diff --git a/nltk/sentiment/sentiment_analyzer.py b/nltk/sentiment/sentiment_analyzer.py index 35bc810d7c..1660e2f841 100644 --- a/nltk/sentiment/sentiment_analyzer.py +++ b/nltk/sentiment/sentiment_analyzer.py @@ -47,10 +47,10 @@ def all_words(self, documents, labeled=None): all_words = [] if labeled is None: labeled = documents and isinstance(documents[0], tuple) - if labeled == True: - for words, sentiment in documents: + if labeled: + for words, _sentiment in documents: all_words.extend(words) - elif labeled == False: + elif not labeled: for words in documents: all_words.extend(words) return all_words @@ -218,7 +218,7 @@ def evaluate( classifier = self.classifier print(f"Evaluating {type(classifier).__name__} results...") metrics_results = {} - if accuracy == True: + if accuracy: accuracy_score = eval_accuracy(classifier, test_set) metrics_results["Accuracy"] = accuracy_score @@ -232,22 +232,22 @@ def evaluate( test_results[observed].add(i) for label in labels: - if precision == True: + if precision: precision_score = eval_precision( gold_results[label], test_results[label] ) metrics_results[f"Precision [{label}]"] = precision_score - if recall == True: + if recall: recall_score = eval_recall(gold_results[label], test_results[label]) metrics_results[f"Recall [{label}]"] = recall_score - if f_measure == True: + if f_measure: f_measure_score = eval_f_measure( gold_results[label], test_results[label] ) metrics_results[f"F-measure [{label}]"] = f_measure_score # Print evaluation results (in alphabetical order) - if verbose == True: + if verbose: for result in sorted(metrics_results): print(f"{result}: {metrics_results[result]}")