From 53dbaa5591003f6764a3d69834e92bc83e3a754c Mon Sep 17 00:00:00 2001 From: mohaned mashaly <30902228+12mohaned@users.noreply.github.com> Date: Thu, 16 Sep 2021 23:40:50 +0200 Subject: [PATCH] refactor: refactor sentiment analyzer by removing dead and slow perfomance code (#2804) * refactor: refactor sentiment analyzer by removing dead and slow perfomance code * refactor: refactor sentiment analyzer by removing dead and slow perfomance code * fix: add not to false boolean values * Refactor: Add sentiment keyword in all_words Co-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com> --- nltk/sentiment/sentiment_analyzer.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/nltk/sentiment/sentiment_analyzer.py b/nltk/sentiment/sentiment_analyzer.py index 35bc810d7c..1660e2f841 100644 --- a/nltk/sentiment/sentiment_analyzer.py +++ b/nltk/sentiment/sentiment_analyzer.py @@ -47,10 +47,10 @@ def all_words(self, documents, labeled=None): all_words = [] if labeled is None: labeled = documents and isinstance(documents[0], tuple) - if labeled == True: - for words, sentiment in documents: + if labeled: + for words, _sentiment in documents: all_words.extend(words) - elif labeled == False: + elif not labeled: for words in documents: all_words.extend(words) return all_words @@ -218,7 +218,7 @@ def evaluate( classifier = self.classifier print(f"Evaluating {type(classifier).__name__} results...") metrics_results = {} - if accuracy == True: + if accuracy: accuracy_score = eval_accuracy(classifier, test_set) metrics_results["Accuracy"] = accuracy_score @@ -232,22 +232,22 @@ def evaluate( test_results[observed].add(i) for label in labels: - if precision == True: + if precision: precision_score = eval_precision( gold_results[label], test_results[label] ) metrics_results[f"Precision [{label}]"] = precision_score - if recall == True: + if recall: recall_score = eval_recall(gold_results[label], test_results[label]) metrics_results[f"Recall [{label}]"] = recall_score - if f_measure == True: + if f_measure: f_measure_score = eval_f_measure( gold_results[label], test_results[label] ) metrics_results[f"F-measure [{label}]"] = f_measure_score # Print evaluation results (in alphabetical order) - if verbose == True: + if verbose: for result in sorted(metrics_results): print(f"{result}: {metrics_results[result]}")