19 Authors: Giacomo Colosio, Sebastiano Colosio, Patrizio Acquadro, Tito Nicola Drugman
29 from pathlib
import Path
30 import tensorflow
as tf
31 import tensorflow.keras.backend
as K
34 from hydra.core.hydra_config
import HydraConfig
35 from omegaconf
import DictConfig
36 from typing
import Optional
38 from src.postprocessing
import spe_postprocess, heatmaps_spe_postprocess, yolo_mpe_postprocess
39 from src.preprocessing
import apply_rescaling
40 from src.utils
import ai_runner_invoke
41 from common.utils
import count_h5_parameters, log_to_file, \
42 ai_runner_interp, ai_interp_input_quant, ai_interp_outputs_dequant
43 from .metrics
import single_pose_oks, multi_pose_oks_mAP, compute_ap
47 def evaluate(cfg: DictConfig =
None, eval_ds: tf.data.Dataset =
None,
48 model_path_to_evaluate: Optional[str] =
None,
49 name_ds: Optional[str] =
'test_set') ->
None:
51 Evaluates and benchmarks a TensorFlow Lite or Keras model, and generates a Config header file if specified.
54 cfg (config): The configuration file.
55 eval_ds (tf.data.Dataset): The validation dataset.
56 model_path_to_evaluate (str, optional): Model path to evaluate
57 name_ds (str): The name of the chosen test_data to be mentioned in the prints and figures.
62 output_dir = HydraConfig.get().runtime.output_dir
63 model_path = model_path_to_evaluate
if model_path_to_evaluate
else os.path.realpath(cfg.general.model_path)
64 name_model = os.path.basename(model_path)
65 model_type = cfg.general.model_type
66 num_threads = cfg.general.num_threads_tflite
67 if cfg.evaluation
and cfg.evaluation.target:
68 target = cfg.evaluation.target
71 file_extension = Path(model_path).suffix
74 eval_ds =
apply_rescaling(dataset=eval_ds, scale=cfg.preprocessing.rescaling.scale,
75 offset=cfg.preprocessing.rescaling.offset)
79 if file_extension ==
'.h5':
80 count_h5_parameters(output_dir=output_dir,
81 model_path=model_path)
82 model = tf.keras.models.load_model(model_path)
84 elif file_extension ==
'.tflite':
85 interpreter_quant = tf.lite.Interpreter(model_path=model_path,num_threads=num_threads)
86 interpreter_quant.allocate_tensors()
87 input_details = interpreter_quant.get_input_details()
88 outputs_details = interpreter_quant.get_output_details()
89 shape_to_resize = list(eval_ds.take(1).as_numpy_iterator())[0][0].shape
90 interpreter_quant.resize_tensor_input(input_details[0][
'index'], shape_to_resize)
91 interpreter_quant.allocate_tensors()
92 elif file_extension ==
'.onnx':
93 sess = onnxruntime.InferenceSession(model_path)
94 inputs = sess.get_inputs()
95 outputs = sess.get_outputs()
97 raise ValueError(f
"Model accuracy evaluation failed\nReceived model path: {model_path}")
99 ai_runner_interpreter = ai_runner_interp(target,name_model)
102 nb_batches = len(eval_ds)
103 tp, conf, nb_gt, maskpad =
None,
None,
None,
None
106 for images,labels
in tqdm.tqdm(eval_ds, total=nb_batches):
107 if Path(model_path).suffix ==
'.h5':
108 predictions = model.predict_on_batch(images)
109 elif Path(model_path).suffix ==
'.tflite':
111 image_processed = images.numpy()
113 if input_details[0][
'dtype'] == np.uint8:
114 image_processed = (image_processed - cfg.preprocessing.rescaling.offset) / cfg.preprocessing.rescaling.scale
115 image_processed = np.clip(np.round(image_processed), np.iinfo(input_details[0][
'dtype']).min, np.iinfo(input_details[0][
'dtype']).max)
116 elif input_details[0][
'dtype'] == np.int8:
117 image_processed = (image_processed - cfg.preprocessing.rescaling.offset) / cfg.preprocessing.rescaling.scale
118 image_processed -= 128
119 image_processed = np.clip(np.round(image_processed), np.iinfo(input_details[0][
'dtype']).min, np.iinfo(input_details[0][
'dtype']).max)
120 elif input_details[0][
'dtype'] == np.float32:
121 image_processed = image_processed
123 print(
'[ERROR] : input dtype not recognized -> ',input_details[0][
'dtype'])
125 imags = image_processed.astype(input_details[0][
'dtype'])
127 if "evaluation" in cfg
and cfg.evaluation:
128 if "gen_npy_input" in cfg.evaluation
and cfg.evaluation.gen_npy_input==
True:
129 images_full.append(imags)
131 interpreter_quant.set_tensor(input_details[0][
'index'], imags)
132 interpreter_quant.invoke()
133 predictions = [interpreter_quant.get_tensor(outputs_details[j][
"index"])
for j
in range(len(outputs_details))][0]
134 elif target ==
'stedgeai_host' or target ==
'stedgeai_n6':
135 data = ai_interp_input_quant(ai_runner_interpreter,images.numpy(),cfg.preprocessing.rescaling.scale, cfg.preprocessing.rescaling.offset,
'.tflite')
137 predictions = ai_interp_outputs_dequant(ai_runner_interpreter,predictions)[0]
138 if "evaluation" in cfg
and cfg.evaluation:
139 if "gen_npy_output" in cfg.evaluation
and cfg.evaluation.gen_npy_output==
True:
140 predictions_all.append(predictions)
142 elif Path(model_path).suffix ==
'.onnx':
143 t_images = tf.transpose(images,[0,3,1,2]).numpy()
145 predictions = sess.run([o.name
for o
in outputs], {inputs[0].name: t_images.astype(
'float32')})[0]
146 elif target ==
'stedgeai_host' or target ==
'stedgeai_n6':
147 data = ai_interp_input_quant(ai_runner_interpreter,t_images,cfg.preprocessing.rescaling.scale, cfg.preprocessing.rescaling.offset,
'.onnx')
149 predictions = ai_interp_outputs_dequant(ai_runner_interpreter,predictions)[0]
151 predictions = tf.cast(predictions,tf.float32)
153 if model_type==
'heatmaps_spe':
156 metric += tf.reduce_mean(oks)
157 elif model_type==
'spe':
160 metric += tf.reduce_mean(oks)
161 elif model_type==
'yolo_mpe':
163 max_output_size=cfg.postprocessing.max_detection_boxes,
164 iou_threshold=cfg.postprocessing.NMS_thresh,
165 score_threshold=cfg.postprocessing.confidence_thresh)
166 if Path(model_path).suffix ==
'.onnx':
167 mask_s = tf.constant([images.shape[2]]*4 + [1.] + [images.shape[2],images.shape[2],1.]*17)
168 poses /= mask_s[
None,
None]
172 tdet_ind = tf.where(oks[1]>0)[:,0]
174 ttp = tf.gather(oks[0],tdet_ind)
175 tconf = tf.gather(oks[1],tdet_ind)
177 tmaskpad = tf.gather(oks[3],tdet_ind)
186 tp = tf.concat([tp,ttp],0)
187 conf = tf.concat([conf,tconf],0)
189 maskpad = tf.concat([maskpad,tmaskpad],0)
191 print(
'No post-processing found for the model type : '+model_type)
194 if "evaluation" in cfg
and cfg.evaluation:
195 if "gen_npy_input" in cfg.evaluation
and cfg.evaluation.gen_npy_input==
True:
196 if "npy_in_name" in cfg.evaluation
and cfg.evaluation.npy_in_name:
197 npy_in_name = cfg.evaluation.npy_in_name
199 npy_in_name =
"unknown_npy_in_name"
200 images_full = np.concatenate(images_full, axis=0)
201 print(
"[INFO] : Shape of npy input dataset = {}".format(images_full.shape))
202 np.save(os.path.join(output_dir, f
"{npy_in_name}.npy"), images_full)
205 if "evaluation" in cfg
and cfg.evaluation:
206 if "gen_npy_output" in cfg.evaluation
and cfg.evaluation.gen_npy_output==
True:
207 if "npy_out_name" in cfg.evaluation
and cfg.evaluation.npy_out_name:
208 npy_out_name = cfg.evaluation.npy_out_name
210 npy_out_name =
"unknown_npy_out_name"
211 predictions_all = np.concatenate(predictions_all, axis=0)
212 np.save(os.path.join(output_dir, f
"{npy_out_name}.npy"), predictions_all)
214 if model_type
in [
'heatmaps_spe',
'spe']:
216 print(
"The mean OKS is : {:.2f}%".format(metric.numpy()*100))
218 if file_extension ==
'.h5':
219 mlflow.log_metric(
"float_OKS", metric.numpy()*100)
220 log_to_file(output_dir,
"float_model_OKS : {:.2f}%".format(metric.numpy()*100))
221 elif file_extension ==
'.tflite':
222 mlflow.log_metric(
"quantized_OKS", metric.numpy()*100)
223 log_to_file(output_dir,
"quantized_model_OKS : {:.2f}%".format(metric.numpy()*100))
225 mlflow.log_metric(
"OKS", metric.numpy()*100)
226 log_to_file(output_dir,
"model_OKS : {:.2f}%".format(metric.numpy()*100))
227 elif model_type==
'yolo_mpe':
228 metric =
compute_ap(tp, conf, nb_gt, maskpad, cfg.postprocessing.plot_metrics)
229 print(
'mAP@0.5 -> {:.2f}%'.format(metric[0]*100))
230 print(
'mAP@[0.5:0.95] -> {:.2f}%'.format(np.mean(metric)*100))
232 if file_extension ==
'.h5':
233 mlflow.log_metric(
"float_mAP_0.5", metric[0]*100)
234 mlflow.log_metric(
"float_mAP_0.5_0.95", np.mean(metric)*100)
235 log_to_file(output_dir,
"float_model_mAP@0.5 -> {:.2f}%".format(metric[0]*100))
236 log_to_file(output_dir,
"float_model_mAP@[0.5:0.95] -> {:.2f}%".format(np.mean(metric)*100))
237 elif file_extension ==
'.tflite':
238 mlflow.log_metric(
"quantized_mAP_0.5", metric[0]*100)
239 mlflow.log_metric(
"quantized_mAP_0.5_0.95", np.mean(metric)*100)
240 log_to_file(output_dir,
"quantized_model_mAP@0.5 -> {:.2f}%".format(metric[0]*100))
241 log_to_file(output_dir,
"quantized_model_mAP@[0.5:0.95] -> {:.2f}%".format(np.mean(metric)*100))
243 mlflow.log_metric(
"mAP_0.5", metric[0]*100)
244 mlflow.log_metric(
"mAP_0.5_0.95", np.mean(metric)*100)
245 log_to_file(output_dir,
"model_mAP@0.5 -> {:.2f}%".format(metric[0]*100))
246 log_to_file(output_dir,
"model_mAP@[0.5:0.95] -> {:.2f}%".format(np.mean(metric)*100))
248 print(
'No metric found for the model type : '+model_type)
None evaluate(DictConfig cfg=None, tf.data.Dataset eval_ds=None, Optional[str] model_path_to_evaluate=None, Optional[str] name_ds='test_set')
def compute_ap(tf.Tensor tp, tf.Tensor conf, tf.Tensor nb_gt, tf.Tensor maskpad, bool plot_metrics)
def multi_pose_oks_mAP(tf.Tensor y_true, tf.Tensor y_pred)
def single_pose_oks(tf.Tensor y_true, tf.Tensor y_pred)
def ai_runner_invoke(image_processed, ai_runner_interpreter)
def heatmaps_spe_postprocess(tf.Tensor tensor)
def yolo_mpe_postprocess(tf.Tensor tensor, int max_output_size=20, float iou_threshold=0.7, float score_threshold=0.25)
def spe_postprocess(tf.Tensor tensor)
def apply_rescaling(tf.data.Dataset dataset=None, float scale=None, float offset=None)