Skip to content
Snippets Groups Projects

- Added code for ap50 calculation

Merged Oluwaseun Akinmukomi requested to merge added-ap50-implementation into master
1 file
+ 65
39
Compare changes
  • Side-by-side
  • Inline
+ 65
39
@@ -29,6 +29,40 @@ from datasets import ImageDataset, Dataset, bbox_iou
@@ -29,6 +29,40 @@ from datasets import ImageDataset, Dataset, bbox_iou
from visualizations import visualize_fms, visualize_predictions, visualize_seed_expansion
from visualizations import visualize_fms, visualize_predictions, visualize_seed_expansion
from object_discovery import lost, detect_box, dino_seg
from object_discovery import lost, detect_box, dino_seg
 
def voc_ap(rec, prec, use_07_metric=False):
 
"""
 
It's gotten from https://github.com/valeoai/LOST/blob/fcedbecb644f18358a660ce58c739cc6374feda8/tools/evaluate_unsupervised_detection_voc.py#L46
 
 
Compute VOC AP given precision and recall. If use_07_metric is true, uses
 
the VOC 07 11-point method (default:False).
 
"""
 
if use_07_metric:
 
# 11 point metric
 
ap = 0.0
 
for t in np.arange(0.0, 1.1, 0.1):
 
if np.sum(rec >= t) == 0:
 
p = 0
 
else:
 
p = np.max(prec[rec >= t])
 
ap = ap + p / 11.0
 
else:
 
# correct AP calculation
 
# first append sentinel values at the end
 
mrec = np.concatenate(([0.0], rec, [1.0]))
 
mpre = np.concatenate(([0.0], prec, [0.0]))
 
 
# compute the precision envelope
 
for i in range(mpre.size - 1, 0, -1):
 
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
 
 
# to calculate area under PR curve, look for points
 
# where X axis (recall) changes value
 
i = np.where(mrec[1:] != mrec[:-1])[0]
 
 
# and sum (\Delta recall) * prec
 
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
 
return ap
 
if __name__ == "__main__":
if __name__ == "__main__":
parser = argparse.ArgumentParser("Unsupervised object discovery with LOST.")
parser = argparse.ArgumentParser("Unsupervised object discovery with LOST.")
parser.add_argument(
parser.add_argument(
@@ -81,7 +115,7 @@ if __name__ == "__main__":
@@ -81,7 +115,7 @@ if __name__ == "__main__":
parser.add_argument("--no_hard", action="store_true", help="Only used in the case of the VOC_all setup (see the paper).")
parser.add_argument("--no_hard", action="store_true", help="Only used in the case of the VOC_all setup (see the paper).")
parser.add_argument("--no_evaluation", action="store_true", help="Compute the evaluation.")
parser.add_argument("--no_evaluation", action="store_true", help="Compute the evaluation.")
parser.add_argument("--save_predictions", default=True, type=bool, help="Save predicted bouding boxes.")
parser.add_argument("--save_predictions", default=True, type=bool, help="Save predicted bouding boxes.")
parser.add_argument("--num_init_seeds", default=1, type=int, help="Number of initial seeds to expand from.")
parser.add_argument("--num_init_seeds", default=50, type=int, help="Number of initial seeds to expand from.")
# Visualization
# Visualization
parser.add_argument(
parser.add_argument(
@@ -118,7 +152,7 @@ if __name__ == "__main__":
@@ -118,7 +152,7 @@ if __name__ == "__main__":
if args.image_path is not None:
if args.image_path is not None:
args.save_predictions = False
args.save_predictions = False
args.no_evaluation = True
args.no_evaluation = False
args.dataset = None
args.dataset = None
# -------------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------------
@@ -169,7 +203,12 @@ if __name__ == "__main__":
@@ -169,7 +203,12 @@ if __name__ == "__main__":
gt_dict = {}
gt_dict = {}
cnt = 0
cnt = 0
corloc = np.zeros(len(dataset.dataloader))
corloc = np.zeros(len(dataset.dataloader))
 
 
total_true_positives = []
 
total_false_positives = []
 
total_gt_boxes = 0;
 
pbar = tqdm(dataset.dataloader)
pbar = tqdm(dataset.dataloader)
for im_id, inp in enumerate(pbar):
for im_id, inp in enumerate(pbar):
torch.cuda.empty_cache()
torch.cuda.empty_cache()
@@ -206,6 +245,7 @@ if __name__ == "__main__":
@@ -206,6 +245,7 @@ if __name__ == "__main__":
if not args.no_evaluation:
if not args.no_evaluation:
gt_bbxs, gt_cls = dataset.extract_gt(inp[1], im_name)
gt_bbxs, gt_cls = dataset.extract_gt(inp[1], im_name)
 
if gt_bbxs is not None:
if gt_bbxs is not None:
# Discard images with no gt annotations
# Discard images with no gt annotations
# Happens only in the case of VOC07 and VOC12
# Happens only in the case of VOC07 and VOC12
@@ -334,48 +374,34 @@ if __name__ == "__main__":
@@ -334,48 +374,34 @@ if __name__ == "__main__":
if args.no_evaluation:
if args.no_evaluation:
continue
continue
# Initialize variables for AP50 calculation
nd = len(gt_bbxs)
tp = 0
total_gt_boxes += nd
fp = 0
total_gt_boxes = len(gt_bbxs)
ap50 = 0
# Compare prediction to GT boxes
for pred in preds:
if len(preds) == 0:
continue
if len(gt_bbxs) == 0:
break # TODO: should do something else, should skip iou but count towards FP if pred exists
ious = bbox_iou(torch.from_numpy(pred), torch.from_numpy(np.asarray(gt_bbxs)))
tp = [0] * nd
 
fp = [0] * nd
# TODO: This calculates the corloc
for idx, gt in enumerate(gt_bbxs):
if torch.any(ious >= 0.50):
for idy, pred in enumerate(preds):
#corloc[im_id] = 1
iou = bbox_iou(torch.from_numpy(pred), torch.from_numpy(gt))
corloc[im_id] = 0
if iou >= 0.50:
for i in ious:
tp[idx] = 1
if i >= 0.50:
break
corloc[im_id] += 1
# Count true positives and false positives at IoU threshold of 0.5
if torch.any(ious >= 0.50):
tp += 1
else:
else:
fp += 1
fp[idx] = 1
cnt += len(gt_bbxs)
if cnt % 50 == 0:
total_true_positives.extend(tp)
pbar.set_description(f"Found {int(np.sum(corloc))}/{cnt}")
total_false_positives.extend(fp)
# Calculate precision and recall at IoU threshold of 0.5
# compute precision recall
precision = tp / (tp + fp)
total_false_positives = np.cumsum(total_false_positives)
recall = tp / total_gt_boxes
total_true_positives = np.cumsum(total_true_positives)
 
rec = total_true_positives / float(total_gt_boxes)
 
# avoid divide by zero in case the first detection matches a difficult
 
# ground truth
 
prec = total_true_positives / np.maximum(total_true_positives + total_false_positives, np.finfo(np.float64).eps)
 
ap = voc_ap(rec, prec, use_07_metric=True)
# Calculate AP50 as average precision at IoU threshold of 0.5
print("AP: %f" % ap)
ap50 = precision * recall
print(f"AP50: {ap50:.2f}")
# Save predicted bounding boxes
# Save predicted bounding boxes
if args.save_predictions:
if args.save_predictions:
Loading