Использование обученной модели для предсказания тегов - Python и keras
I have dataset with two columns, post and tags ("some text","tag"), and I have successfully trained model with 98% accuracy.The problem is how can I now input some other text and let model to predict what tag will it be? I've searched tutorials but I didn't find in any of them (in few there is testing but it is not applicable in this example) how the model is tested with data outside of dataset like text input so model could predict. This is what I have so far....
import keras import numpy as np from keras.preprocessing.text import Tokenizer import numpy as np import pandas as pd from keras.models import Sequential from keras.layers import Dense from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Dense, Dropout, Embedding, LSTM, Flatten from keras.models import Model from keras.utils import to_categorical from keras.callbacks import ModelCheckpoint import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score data = pd.read_csv('dataset3.csv') print(data.head(10)) print(data.tags.value_counts()) data['target'] = data.tags.astype('category').cat.codes data['num_words'] = data.post.apply(lambda x : len(x.split())) bins=[0,50,75, np.inf] data['bins']=pd.cut(data.num_words, bins=[0,100,300,500,800, np.inf], labels=['0-100', '100-300', '300-500','500-800' ,'>800']) word_distribution = data.groupby('bins').size().reset_index().rename(columns={0:'counts'}) word_distribution.head() num_class = len(np.unique(data.tags.values)) y = data['target'].values MAX_LENGTH = 500 tokenizer = Tokenizer() tokenizer.fit_on_texts(data.post.values) post_seq = tokenizer.texts_to_sequences(data.post.values) post_seq_padded = pad_sequences(post_seq, maxlen=MAX_LENGTH) X_train, X_test, y_train, y_test = train_test_split(post_seq_padded, y, test_size=0.05) vocab_size = len(tokenizer.word_index) + 1 inputs = Input(shape=(MAX_LENGTH, )) embedding_layer = Embedding(vocab_size, 128, input_length=MAX_LENGTH)(inputs) x = Flatten()(embedding_layer) x = Dense(32, activation='relu')(x) predictions = Dense(num_class, activation='softmax')(x) model = Model(inputs=[inputs], outputs=predictions) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) model.summary() filepath="weights-simple.hdf5" checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max') history = model.fit([X_train], batch_size=64, y=to_categorical(y_train), verbose=1, validation_split=0.25, shuffle=True, epochs=5, callbacks=[checkpointer]) df = pd.DataFrame({'epochs':history.epoch, 'accuracy': history.history['acc'], 'validation_accuracy': history.history['val_acc']}) g = sns.pointplot(x="epochs", y="accuracy", data=df, fit_reg=False) g = sns.pointplot(x="epochs", y="validation_accuracy", data=df, fit_reg=False, color='green') predicted = model.predict(X_test) predicted = np.argmax(predicted, axis=1) accuracy_score(y_test, predicted)
Что я уже пробовал:
Мне нужно ввести текст, чтобы модель предсказала, какой это тег. Я пробовал искать в интернете подобные модели, но все они объясняются до обучения, а не тестирования с внешними данными. Я новичок в Keras и машинном обучении, но это меня завораживает, и я хочу учиться, но застрял. Я знаю, что это основы, но я действительно не знаю, что делать дальше?
Я был бы признателен за помощь.
Спасибо