# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import evaluate import datasets import motmetrics as mm import numpy as np _CITATION = """\ @InProceedings{huggingface:module, title = {A great new module}, authors={huggingface, Inc.}, year={2020} }\ @article{milan2016mot16, title={MOT16: A benchmark for multi-object tracking}, author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad}, journal={arXiv preprint arXiv:1603.00831}, year={2016} } """ _DESCRIPTION = """\ The MOT Metrics module is designed to evaluate multi-object tracking (MOT) algorithms by computing various metrics based on predicted and ground truth bounding boxes. It serves as a crucial tool in assessing the performance of MOT systems, aiding in the iterative improvement of tracking algorithms.""" _KWARGS_DESCRIPTION = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of predictions to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. max_iou (`float`, *optional*): If specified, this is the minimum Intersection over Union (IoU) threshold to consider a detection as a true positive. Default is 0.5. """ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class MotMetrics(evaluate.Metric): """TODO: Short description of my evaluation module.""" def _info(self): # TODO: Specifies the evaluate.EvaluationModuleInfo object return evaluate.MetricInfo( # This is the description that will appear on the modules page. module_type="metric", description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, # This defines the format of each prediction and reference features=datasets.Features({ "predictions": datasets.Sequence( datasets.Sequence(datasets.Value("float")) ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("float")) ) }), # Additional links to the codebase or references codebase_urls=["http://github.com/path/to/codebase/of/new_module"], reference_urls=["http://path.to.reference.url/new_module"] ) def _download_and_prepare(self, dl_manager): """Optional: download external resources useful to compute the scores""" # TODO: Download external resources if needed pass def _compute(self, payload, max_iou: float = 0.5, debug: bool = False): """Returns the scores""" # TODO: Compute the different scores of the module return calculate_from_payload(payload, max_iou, debug) #return calculate(predictions, references, max_iou) def calculate(predictions, references, max_iou: float = 0.5): """Returns the scores""" try: np_predictions = np.array(predictions) except: raise ValueError("The predictions should be a list of np.arrays in the format [frame number, object id, bb_left, bb_top, bb_width, bb_height, confidence]") try: np_references = np.array(references) except: raise ValueError("The references should be a list of np.arrays in the format [frame number, object id, bb_left, bb_top, bb_width, bb_height]") if np_predictions.shape[1] != 7: raise ValueError("The predictions should be a list of np.arrays in the format [frame number, object id, bb_left, bb_top, bb_width, bb_height, confidence]") if np_references.shape[1] != 6: raise ValueError("The references should be a list of np.arrays in the format [frame number, object id, bb_left, bb_top, bb_width, bb_height]") if np_predictions[:, 0].min() <= 0: raise ValueError("The frame number in the predictions should be a positive integer") if np_references[:, 0].min() <= 0: raise ValueError("The frame number in the references should be a positive integer") num_frames = int(max(np_references[:, 0].max(), np_predictions[:, 0].max())) acc = mm.MOTAccumulator(auto_id=True) for i in range(1, num_frames+1): preds = np_predictions[np_predictions[:, 0] == i, 1:6] refs = np_references[np_references[:, 0] == i, 1:6] C = mm.distances.iou_matrix(refs[:,1:], preds[:,1:], max_iou = max_iou) acc.update(refs[:,0].astype('int').tolist(), preds[:,0].astype('int').tolist(), C) mh = mm.metrics.create() summary = mh.compute(acc).to_dict() for key in summary: summary[key] = summary[key][0] return summary def calculate_from_payload(payload: dict, max_iou: float = 0.5, debug: bool = False): if not isinstance(payload, dict): try: payload = payload.to_dict() except Exception as e: raise ValueError( "The payload should be a dictionary or a compatible object" ) from e gt_field_name = payload['gt_field_name'] models = payload['models'] sequence_list = payload['sequence_list'] if debug: print("gt_field_name: ", gt_field_name) print("models: ", models) print("sequence_list: ", sequence_list) output = {} for sequence in sequence_list: output[sequence] = {} frames = payload['sequences'][sequence][gt_field_name] formated_references = [] for frame_id, frame in enumerate(frames): for detection in frame: id = detection['index'] x, y, w, h = detection['bounding_box'] formated_references.append([frame_id+1, id, x, y, w, h]) for model in models: frames = payload['sequences'][sequence][model] formated_predictions = [] for frame_id, frame in enumerate(frames): for detection in frame: id = detection['index'] x, y, w, h = detection['bounding_box'] confidence = detection['confidence'] confidence = 1 #TODO: remove this line formated_predictions.append([frame_id+1, id, x, y, w, h, confidence]) if debug: print("sequence/model: ", sequence, model) print("formated_predictions: ", formated_predictions) print("formated_references: ", formated_references) if len(formated_predictions) == 0: output[sequence][model] = "Model had no predictions." elif len(formated_references) == 0: output[sequence][model] = "No ground truth." else: output[sequence][model] = calculate(formated_predictions, formated_references, max_iou=max_iou) return output