We trained a XGB model with two columns. Summarized has texts. Security_Flag has 0 and 1. Test and training works well. Now we want to add a new sentence (not included in the original file). It still works as long as we use only known words from the original file. But if we use a complete new word, we get an error message.
It all works - only the last code line throws the error
Please advise Thanks
We tried to enter the new sentence in different ways.
import matplotlib.pyplot as plt
from xgboost import plot_tree
import xgboost as xgb
import pandas as pd
import numpy as np
import pickle
import string
import nltk
import csv
import os
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.datasets import dump_svmlight_file
from sklearn.metrics import precision_score
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def pp(text):
# tokenize into words
# remove stopwords
stop = stopwords.words('german')
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
tokens = [token for token in tokens if token not in stop]
# remove words less than three letters
tokens = [word for word in tokens if len(word) >= 3]
# lower capitalization
tokens = [word.lower() for word in tokens]
# lemmatize
lmtzr = nltk.WordNetLemmatizer()
tokens = [lmtzr.lemmatize(word) for word in tokens]
preprocessed_text= ' '.join(tokens)
return preprocessed_text
df = pd.read_csv("file03.csv", sep=",", usecols=["Security_Flag","Summary"])
y = df["Security_Flag"]
# from dataframe to array for train test splitting
y = y.values
Z = []
for row in df['Summary']:
l = pp(row)
Z.append(l)
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(Z)
X = X.toarray()
#X = pd.DataFrame(data=X[0:,0:])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=41)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
param = {
'max_depth': 3, # the maximum depth of each tree
'eta': 0.3, # the training step for each iteration
'silent': 1, # logging mode - quiet
'objective': 'multi:softprob', # error evaluation for multiclass training
# 'objective': 'binary:logistic', # error evaluation for multiclass training
'num_class': 2} # the number of classes that exist in this datset
num_round = 20 # the number of training iterations
bst = xgb.train(param, dtrain, num_round)
preds = bst.predict(dtest)
best_preds = np.asarray([np.argmax(line) for line in preds])
stest = xgb.DMatrix([X_test[0]])
spred = bst.predict(stest)
print(confusion_matrix(y_test, best_preds))
while True:
ts = input("Enter a sentence: ")
ts = pp(ts)
Z.append(ts)
Y = vectorizer.fit_transform(Z)
Y = Y.toarray()
test = xgb.DMatrix([Y[-1]])
spred = bst.predict(test)
"The expected result would be a one or zero. The output is an error message." training data did not have the following fields: f1354, f1355, f1352, f1353
Aucun commentaire:
Enregistrer un commentaire