Keep the most informative features from NLTK NaiveBayesClassifier in the list

I am trying to use this Naive Bayes Classifier in python:

classifier = nltk.NaiveBayesClassifier.train(train_set)
print "Naive Bayes Accuracy " + str(nltk.classify.accuracy(classifier, test_set)*100)
classifier.show_most_informative_features(5)

i has the following output:

Console exit

You can clearly see what words appear in the "important" and which belong to the category of "spam." But I can not work with these values. I really need a list that looks like this:

[[pass,important],[respective,spam],[investment,spam],[internet,spam],[understands,spam]]

I am new to python and have difficulty inventing all this, can anyone help? I'll be very grateful.

+6
source share
3 answers

You can slightly modify the source codeshow_most_informative_features to suit your tasks.

- , ( , ).

:

def show_most_informative_features_in_list(classifier, n=10):
    """
    Return a nested list of the "most informative" features 
    used by the classifier along with it predominant labels
    """
    cpdist = classifier._feature_probdist       # probability distribution for feature values given labels
    feature_list = []
    for (fname, fval) in classifier.most_informative_features(n):
        def labelprob(l):
            return cpdist[l, fname].prob(fval)
        labels = sorted([l for l in classifier._labels if fval in cpdist[l, fname].samples()], 
                        key=labelprob)
        feature_list.append([fname, labels[-1]])
    return feature_list

, / nltk:

show_most_informative_features_in_list(classifier, 10)

:

[['outstanding', 'pos'],
 ['ludicrous', 'neg'],
 ['avoids', 'pos'],
 ['astounding', 'pos'],
 ['idiotic', 'neg'],
 ['atrocious', 'neg'],
 ['offbeat', 'pos'],
 ['fascination', 'pos'],
 ['symbol', 'pos'],
 ['animators', 'pos']]
+2

most_informative_features()

NLTK/Python:

import string
from itertools import chain

from nltk.corpus import movie_reviews as mr
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from nltk.classify import NaiveBayesClassifier as nbc
import nltk

stop = stopwords.words('english')
documents = [([w for w in mr.words(i) if w.lower() not in stop and w.lower() not in string.punctuation], i.split('/')[0]) for i in mr.fileids()]

word_features = FreqDist(chain(*[i for i,j in documents]))
word_features = list(word_features.keys())[:100]

numtrain = int(len(documents) * 90 / 100)
train_set = [({i:(i in tokens) for i in word_features}, tag) for tokens,tag in documents[:numtrain]]
test_set = [({i:(i in tokens) for i in word_features}, tag) for tokens,tag in documents[numtrain:]]

classifier = nbc.train(train_set)

:

print classifier.most_informative_features()

[]:

[('turturro', True),
 ('inhabiting', True),
 ('taboo', True),
 ('conflicted', True),
 ('overacts', True),
 ('rescued', True),
 ('stepdaughter', True),
 ('apologizing', True),
 ('pup', True),
 ('inform', True)]

:

classifier.most_informative_features(n=len(word_features))

[]:

[('turturro', True),
 ('inhabiting', True),
 ('taboo', True),
 ('conflicted', True),
 ('overacts', True),
 ('rescued', True),
 ('stepdaughter', True),
 ('apologizing', True),
 ('pup', True),
 ('inform', True),
 ('commercially', True),
 ('utilize', True),
 ('gratuitous', True),
 ('visible', True),
 ('internet', True),
 ('disillusioned', True),
 ('boost', True),
 ('preventing', True),
 ('built', True),
 ('repairs', True),
 ('overplaying', True),
 ('election', True),
 ('caterer', True),
 ('decks', True),
 ('retiring', True),
 ('pivot', True),
 ('outwitting', True),
 ('solace', True),
 ('benches', True),
 ('terrorizes', True),
 ('billboard', True),
 ('catalogue', True),
 ('clean', True),
 ('skits', True),
 ('nice', True),
 ('feature', True),
 ('must', True),
 ('withdrawn', True),
 ('indulgence', True),
 ('tribal', True),
 ('freeman', True),
 ('must', False),
 ('nice', False),
 ('feature', False),
 ('gratuitous', False),
 ('turturro', False),
 ('built', False),
 ('internet', False),
 ('rescued', False),
 ('clean', False),
 ('overacts', False),
 ('gregor', False),
 ('conflicted', False),
 ('taboo', False),
 ('inhabiting', False),
 ('utilize', False),
 ('churns', False),
 ('boost', False),
 ('stepdaughter', False),
 ('complementary', False),
 ('gleiberman', False),
 ('skylar', False),
 ('kirkpatrick', False),
 ('hardship', False),
 ('election', False),
 ('inform', False),
 ('disillusioned', False),
 ('visible', False),
 ('commercially', False),
 ('frosted', False),
 ('pup', False),
 ('apologizing', False),
 ('freeman', False),
 ('preventing', False),
 ('nutsy', False),
 ('intrinsics', False),
 ('somalia', False),
 ('coordinators', False),
 ('strengthening', False),
 ('impatience', False),
 ('subtely', False),
 ('426', False),
 ('schreber', False),
 ('brimley', False),
 ('motherload', False),
 ('creepily', False),
 ('perturbed', False),
 ('accountants', False),
 ('beringer', False),
 ('scrubs', False),
 ('1830s', False),
 ('analogue', False),
 ('espouses', False),
 ('xv', False),
 ('skits', False),
 ('solace', False),
 ('reduncancy', False),
 ('parenthood', False),
 ('insulators', False),
 ('mccoll', False)]

:

>>> type(classifier.most_informative_features(n=len(word_features)))
list
>>> type(classifier.most_informative_features(10)[0][1])
bool

, , , , most_informative_features() , .

import string
from itertools import chain

from nltk.corpus import movie_reviews as mr
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from nltk.classify import NaiveBayesClassifier as nbc
import nltk

stop = stopwords.words('english')
documents = [([w for w in mr.words(i) if w.lower() not in stop and w.lower() not in string.punctuation], i.split('/')[0]) for i in mr.fileids()]

word_features = FreqDist(chain(*[i for i,j in documents]))
word_features = list(word_features.keys())[:100]

numtrain = int(len(documents) * 90 / 100)
train_set = [({i:'positive' if (i in tokens) else 'negative' for i in word_features}, tag) for tokens,tag in documents[:numtrain]]
test_set = [({i:'positive' if (i in tokens) else 'negative'  for i in word_features}, tag) for tokens,tag in documents[numtrain:]]

classifier = nbc.train(train_set)

>>> classifier.most_informative_features(10)
[('turturro', 'positive'),
 ('inhabiting', 'positive'),
 ('conflicted', 'positive'),
 ('taboo', 'positive'),
 ('overacts', 'positive'),
 ('rescued', 'positive'),
 ('stepdaughter', 'positive'),
 ('pup', 'positive'),
 ('apologizing', 'positive'),
 ('inform', 'positive')]

>>> type(classifier.most_informative_features(10)[0][1])
str
+1

( ) p ( | ) .

, . , , A. , , B. , .

:

classA_freq_distribution = nltk.FreqDist(classAWords)
classB_freq_distribution = nltk.FreqDist(classBWords)
classA_word_features = list(classA_freq_distribution.keys())[:3000]
classB_word_features = list(classB_freq_distribution.keys())[:3000]

This will allow you to get the 3000 best functions from each list, but you can choose a number other than 3000. Now you have a frequency distribution that you can calculate p (word | class), and then look at the differences between the two calsses.

diff = []
features = []
for feature in classA_word_features:
    features.append(feature)

    diff.append(classB_freq_distribution[feature]
    /len(classBWords) 
    - classA_freq_distribution[feature]/len(classAWords))
all_features = pd.DataFrame({
    'Feature': features,
    'Diff': diff
})

Then you can sort and view the words with the highest and lowest meaning.

sorted = all_features.sort_values(by=['Diff'], ascending=False)
print(sorted)
0
source

Source: https://habr.com/ru/post/1015958/


All Articles