From ce3d3a08094715aa6eee5cb528f4d78a9d40e44d Mon Sep 17 00:00:00 2001 From: Daniel Ecer <de-code@users.noreply.github.com> Date: Fri, 28 Jul 2017 14:40:16 +0100 Subject: [PATCH] fixed: macro should have been micro --- .../trainer/models/pix2pix/evaluate.py | 24 +++++++++---------- .../trainer/models/pix2pix/evaluate_test.py | 22 ++++++++--------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/sciencebeam_gym/trainer/models/pix2pix/evaluate.py b/sciencebeam_gym/trainer/models/pix2pix/evaluate.py index 2cdb358..03d78b3 100644 --- a/sciencebeam_gym/trainer/models/pix2pix/evaluate.py +++ b/sciencebeam_gym/trainer/models/pix2pix/evaluate.py @@ -12,9 +12,9 @@ EvaluationTensors = collections.namedtuple( "fp", "fn", "accuracy", - "macro_precision", - "macro_recall", - "macro_f1" + "micro_precision", + "micro_recall", + "micro_f1" ] ) @@ -33,18 +33,18 @@ def _evaluate_from_confusion_matrix(confusion, accuracy=None): total_tp = tf.reduce_sum(tp) total_fp = tf.reduce_sum(fp) total_fn = tf.reduce_sum(fn) - macro_precision = total_tp / (total_tp + total_fp) - macro_recall = total_tp / (total_tp + total_fn) - macro_f1 = 2 * macro_precision * macro_recall / (macro_precision + macro_recall) + micro_precision = total_tp / (total_tp + total_fp) + micro_recall = total_tp / (total_tp + total_fn) + micro_f1 = 2 * micro_precision * micro_recall / (micro_precision + micro_recall) return EvaluationTensors( confusion_matrix=confusion, tp=tp, fp=fp, fn=fn, accuracy=accuracy, - macro_precision=macro_precision, - macro_recall=macro_recall, - macro_f1=macro_f1 + micro_precision=micro_precision, + micro_recall=micro_recall, + micro_f1=micro_f1 ) def evaluate_predictions(labels, predictions, n_classes, has_unknown_class=False): @@ -80,7 +80,7 @@ def evaluate_separate_channels(targets, outputs, has_unknown_class=False): def evaluation_summary(evaluation_tensors): - tf.summary.scalar("macro_precision", evaluation_tensors.macro_precision) - tf.summary.scalar("macro_recall", evaluation_tensors.macro_recall) - tf.summary.scalar("macro_f1", evaluation_tensors.macro_f1) + tf.summary.scalar("micro_precision", evaluation_tensors.micro_precision) + tf.summary.scalar("micro_recall", evaluation_tensors.micro_recall) + tf.summary.scalar("micro_f1", evaluation_tensors.micro_f1) tf.summary.scalar("accuracy", evaluation_tensors.accuracy) diff --git a/sciencebeam_gym/trainer/models/pix2pix/evaluate_test.py b/sciencebeam_gym/trainer/models/pix2pix/evaluate_test.py index 976b977..5330211 100644 --- a/sciencebeam_gym/trainer/models/pix2pix/evaluate_test.py +++ b/sciencebeam_gym/trainer/models/pix2pix/evaluate_test.py @@ -31,21 +31,21 @@ def test_evaluate_predictions(): assert np.array_equal(session.run(evaluation_tensors.tp), np.array([1, 1, 0, 2])) assert np.array_equal(session.run(evaluation_tensors.fp), np.array([0, 1, 1, 0])) assert np.array_equal(session.run(evaluation_tensors.fn), np.array([0, 0, 1, 1])) - expected_macro_precision = 4.0 / (4 + 2) - expected_macro_recall = 4.0 / (4 + 2) - expected_macro_f1 = ( - 2 * expected_macro_precision * expected_macro_recall / - (expected_macro_precision + expected_macro_recall) + expected_micro_precision = 4.0 / (4 + 2) + expected_micro_recall = 4.0 / (4 + 2) + expected_micro_f1 = ( + 2 * expected_micro_precision * expected_micro_recall / + (expected_micro_precision + expected_micro_recall) ) assert _scalar_close( - session.run(evaluation_tensors.macro_precision), - expected_macro_precision + session.run(evaluation_tensors.micro_precision), + expected_micro_precision ) assert _scalar_close( - session.run(evaluation_tensors.macro_recall), - expected_macro_recall + session.run(evaluation_tensors.micro_recall), + expected_micro_recall ) assert _scalar_close( - session.run(evaluation_tensors.macro_f1), - expected_macro_f1 + session.run(evaluation_tensors.micro_f1), + expected_micro_f1 ) -- GitLab