This repository was archived by the owner on Dec 30, 2025. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy patheval.py
More file actions
105 lines (82 loc) · 4.42 KB
/
eval.py
File metadata and controls
105 lines (82 loc) · 4.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# -*- coding: utf-8 -*-
import sys
from evaluation.eval_proposal import ANETproposal
from evaluation.eval_detection import ANETdetection
import matplotlib.pyplot as plt
import numpy as np
import json
from ipdb import set_trace
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def write_results(log):
with open("./ap_result.txt", "w") as f:
f.write(log)
def run_proposal_evaluation(ground_truth_filename, proposal_filename,
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation', assign_class=None):
anet_proposal = ANETproposal(ground_truth_filename, proposal_filename,
tiou_thresholds=tiou_thresholds,
max_avg_nr_proposals=max_avg_nr_proposals,
subset=subset, verbose=True, check_status=False,
assign_class=assign_class)
anet_proposal.evaluate()
recall = anet_proposal.recall
average_recall = anet_proposal.avg_recall
average_nr_proposals = anet_proposal.proposals_per_video
auc_rate = anet_proposal.auc_rate
return (average_nr_proposals, average_recall, recall, auc_rate)
def run_detection_evaluation(ground_truth_filename, detection_filename,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation', assign_class=None):
anet_detection = ANETdetection(ground_truth_filename, detection_filename,
tiou_thresholds=tiou_thresholds,
subset=subset, verbose=True, check_status=False,
assign_class=assign_class)
anet_detection.evaluate()
def plot_metric(opt, average_nr_proposals, average_recall, recall, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
fn_size = 14
plt.figure(num=None, figsize=(12, 8))
ax = plt.subplot(1, 1, 1)
colors = ['k', 'r', 'yellow', 'b', 'c', 'm', 'b', 'pink', 'lawngreen', 'indigo']
area_under_curve = np.zeros_like(tiou_thresholds)
for i in range(recall.shape[0]):
area_under_curve[i] = np.trapz(recall[i], average_nr_proposals)
for idx, tiou in enumerate(tiou_thresholds[::2]):
ax.plot(average_nr_proposals, recall[2 * idx, :], color=colors[idx + 1],
label="tiou=[" + str(tiou) + "], area=" + str(int(area_under_curve[2 * idx] * 100) / 100.),
linewidth=4, linestyle='--', marker=None)
# Plots Average Recall vs Average number of proposals.
ax.plot(average_nr_proposals, average_recall, color=colors[0],
label="tiou = 0.5:0.05:0.95," + " area=" + str(
int(np.trapz(average_recall, average_nr_proposals) * 100) / 100.),
linewidth=4, linestyle='-', marker=None)
handles, labels = ax.get_legend_handles_labels()
ax.legend([handles[-1]] + handles[:-1], [labels[-1]] + labels[:-1], loc='best')
plt.ylabel('Average Recall', fontsize=fn_size)
plt.xlabel('Average Number of Proposals per Video', fontsize=fn_size)
plt.grid(b=True, which="both")
plt.ylim([0, 1.0])
plt.setp(plt.axes().get_xticklabels(), fontsize=fn_size)
plt.setp(plt.axes().get_yticklabels(), fontsize=fn_size)
plt.savefig(opt["save_fig_path"])
def evaluation_proposal(opt):
uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid, auc_rate = run_proposal_evaluation(
opt["video_anno"],
opt["proposals_result_file"],
max_avg_nr_proposals=100,
tiou_thresholds=np.linspace(0.5, 0.95, 10),
subset='validation')
# plot_metric(opt,uniform_average_nr_proposals_valid, uniform_average_recall_valid, uniform_recall_valid)
print("AR@1 is \t", np.mean(uniform_recall_valid[:, 0]))
print("AR@5 is \t", np.mean(uniform_recall_valid[:, 4]))
print("AR@10 is \t", np.mean(uniform_recall_valid[:, 9]))
print("AR@100 is \t", np.mean(uniform_recall_valid[:, -1]))
return auc_rate
def evaluation_detection(opt, assign_class=None):
run_detection_evaluation(opt["video_anno"],
opt["detection_result_file"],
subset='validation',
assign_class=assign_class)