Skip to content

Commit

Permalink
refactor: refactor sentiment analyzer by removing dead and slow perfo…
Browse files Browse the repository at this point in the history
…mance code (#2804)

* refactor: refactor sentiment analyzer by removing dead and slow perfomance code

* refactor: refactor sentiment analyzer by removing dead and slow perfomance code

* fix: add not to false boolean values

* Refactor: Add sentiment keyword in all_words

Co-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>
  • Loading branch information
12mohaned and tomaarsen committed Sep 16, 2021
1 parent 49e5d6e commit 53dbaa5
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions nltk/sentiment/sentiment_analyzer.py
Expand Up @@ -47,10 +47,10 @@ def all_words(self, documents, labeled=None):
all_words = []
if labeled is None:
labeled = documents and isinstance(documents[0], tuple)
if labeled == True:
for words, sentiment in documents:
if labeled:
for words, _sentiment in documents:
all_words.extend(words)
elif labeled == False:
elif not labeled:
for words in documents:
all_words.extend(words)
return all_words
Expand Down Expand Up @@ -218,7 +218,7 @@ def evaluate(
classifier = self.classifier
print(f"Evaluating {type(classifier).__name__} results...")
metrics_results = {}
if accuracy == True:
if accuracy:
accuracy_score = eval_accuracy(classifier, test_set)
metrics_results["Accuracy"] = accuracy_score

Expand All @@ -232,22 +232,22 @@ def evaluate(
test_results[observed].add(i)

for label in labels:
if precision == True:
if precision:
precision_score = eval_precision(
gold_results[label], test_results[label]
)
metrics_results[f"Precision [{label}]"] = precision_score
if recall == True:
if recall:
recall_score = eval_recall(gold_results[label], test_results[label])
metrics_results[f"Recall [{label}]"] = recall_score
if f_measure == True:
if f_measure:
f_measure_score = eval_f_measure(
gold_results[label], test_results[label]
)
metrics_results[f"F-measure [{label}]"] = f_measure_score

# Print evaluation results (in alphabetical order)
if verbose == True:
if verbose:
for result in sorted(metrics_results):
print(f"{result}: {metrics_results[result]}")

Expand Down

0 comments on commit 53dbaa5

Please sign in to comment.