代码
python
import numpy as np
import tensorflow as tf
from tensorflow import keras
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
加载诗歌数据集
with open('poems.txt', 'r') as f:
poems = f.read().splitlines()
准备训练数据
max_length = 100
X = []
y = []
for seq in sequences:
for i in range(len(seq) max_length):
X.append(seq[i:i + max_length])
y.append(seq[i + max_length])