Seq2Seq 모델을 활용한 챗봇 생성

from IPython.display import Image

Seq2Seq 모델의 개요

Image('https://wikidocs.net/images/page/24996/%EC%9D%B8%EC%BD%94%EB%8D%94%EB%94%94%EC%BD%94%EB%8D%94%EB%AA%A8%EB%8D%B8.PNG')

데이터셋에 필요한 라이브러리를 다운로드 받습니다.

Korpora는 한글 자연어처리 데이터)셋입니다.

설치 명령어

# !pip install Korpora
  • 이 중 챗봇용 데이터셋인 KoreanChatbotKorpus를 다운로드 받습니다.
  • KoreanChatbotKorpus 데이터셋을 활용하여 챗봇 모델을 학습합니다.
  • text, pair로 구성되어 있습니다.
  • 질의는 text, 답변은 pair입니다.
from Korpora import KoreanChatbotKorpus
corpus = KoreanChatbotKorpus()

예시 텍스트를 보면 구어체로 구성되어 있습니다.

corpus.get_all_texts()[:10]

get_all_pairs()textpair가 쌍으로 이루어져 있습니다.

corpus.get_all_pairs()[0].text
corpus.get_all_pairs()[0].pair

데이터 전처리

questionanswer를 분리합니다.

question은 질의로 활용될 데이터셋, answer는 답변으로 활용될 데이터 셋입니다.

texts = []
pairs = []

for sentence in corpus.get_all_pairs():
    texts.append(sentence.text)
    pairs.append(sentence.pair)
list(zip(texts, pairs))[:5]

특수문자는 제거합니다.

한글과 숫자를 제외한 특수문자를 제거하도록 합니다.

[참고] 튜토리얼에서는 특수문자와 영문자를 제거하나, 실제 프로젝트에 적용해보기 위해서는 신중히 결정해야합니다.

챗봇 대화에서 영어도 많이 사용되고, 특수문자도 굉장히 많이 사용됩니다. 따라서, 선택적으로 제거할 특수기호나 영문자를 정의한 후에 전처리를 진행하야합니다.

# re 모듈은 regex expression을 적용하기 위하여 활용합니다.
import re
def clean_sentence(sentence):
    # 한글, 숫자를 제외한 모든 문자는 제거합니다.
    sentence = re.sub(r'[^0-9ㄱ-ㅎㅏ-ㅣ가-힣 ]',r'', sentence)
    return sentence

적용한 예시

한글, 숫자 이외의 모든 문자를 전부 제거됨을 확인할 수 있습니다.

clean_sentence('12시 땡^^!??')
clean_sentence('abcef가나다^^$%@12시 땡^^!??')

한글 형태소 분석기 (Konlpy)

형태소 분석기를 활용하여 문장을 분리합니다.

가방에 들어가신다 -> 가방/NNG + 에/JKM + 들어가/VV + 시/EPH + ㄴ다/EFN

  • 형태소 분석 이란 형태소를 비롯하여, 어근, 접두사/접미사, 품사(POS, part-of-speech) 등 다양한 언어적 속성의 구조를 파악하는 것입니다.
  • konlpy 형태소 분석기를 활용하여 한글 문장에 대한 토큰화처리를 보다 효율적으로 처리합니다.

공식 도큐먼트

설치

# !pip install konlpy

konlpy 내부에는 Kkma, Okt, Twitter 등등의 형태소 분석기가 존재하지만, 이번 튜토리얼에서는 Okt를 활용하도록 하겠습니다.

from konlpy.tag import Okt
okt = Okt()
# 형태소 변환에 활용하는 함수
# morphs 함수 안에 변환한 한글 문장을 입력 합니다.
def process_morph(sentence):
    return ' '.join(okt.morphs(sentence))

Seq2Seq 모델이 학습하기 위한 데이터셋을 구성할 때, 다음과 같이 3가지 데이터셋을 구성합니다.

  • question: encoder input 데이터셋 (질의 전체)
  • answer_input: decoder input 데이터셋 (답변의 시작). START 토큰을 문장 처음에 추가 합니다.
  • answer_output: decoder output 데이터셋 (답변의 끝). END 토큰을 문장 마지막에 추가 합니다.
def clean_and_morph(sentence, is_question=True):
    # 한글 문장 전처리
    sentence = clean_sentence(sentence)
    # 형태소 변환
    sentence = process_morph(sentence)
    # Question 인 경우, Answer인 경우를 분기하여 처리합니다.
    if is_question:
        return sentence
    else:
        # START 토큰은 decoder input에 END 토큰은 decoder output에 추가합니다.
        return ('<START> ' + sentence, sentence + ' <END>')
def preprocess(texts, pairs):
    questions = []
    answer_in = []
    answer_out = []

    # 질의에 대한 전처리
    for text in texts:
        # 전처리와 morph 수행
        question = clean_and_morph(text, is_question=True)
        questions.append(question)

    # 답변에 대한 전처리
    for pair in pairs:
        # 전처리와 morph 수행
        in_, out_ = clean_and_morph(pair, is_question=False)
        answer_in.append(in_)
        answer_out.append(out_)
    
    return questions, answer_in, answer_out
questions, answer_in, answer_out = preprocess(texts, pairs)
questions[:5]
answer_in[:5]
answer_out[:5]
all_sentences = questions + answer_in + answer_out
a = (' '.join(questions) + ' '.join(answer_in) + ' '.join(answer_out)).split()
len(set(a))

토큰화

import numpy as np
import warnings
import tensorflow as tf

from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

# WARNING 무시
warnings.filterwarnings('ignore')

토큰의 정의

tokenizer = Tokenizer(filters='', lower=False, oov_token='<OOV>')

Tokenizer로 문장에 대한 Word-Index Vocabulary(단어 사전)을 만듭니다.

tokenizer.fit_on_texts(all_sentences)

단어 사전 10개 출력

for word, idx in tokenizer.word_index.items():
    print(f'{word}\t\t => \t{idx}')
    if idx > 10:
        break

토큰의 갯수 확인

len(tokenizer.word_index)

치환: 텍스트를 시퀀스로 인코딩 (texts_to_sequences)

question_sequence = tokenizer.texts_to_sequences(questions)
answer_in_sequence = tokenizer.texts_to_sequences(answer_in)
answer_out_sequence = tokenizer.texts_to_sequences(answer_out)

문장의 길이 맞추기 (pad_sequences)

MAX_LENGTH = 30
question_padded = pad_sequences(question_sequence, maxlen=MAX_LENGTH, truncating='post', padding='post')
answer_in_padded = pad_sequences(answer_in_sequence, maxlen=MAX_LENGTH, truncating='post', padding='post')
answer_out_padded = pad_sequences(answer_out_sequence, maxlen=MAX_LENGTH, truncating='post', padding='post')
question_padded.shape
answer_in_padded.shape, answer_out_padded.shape

모델

from tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint

학습용 인코더 (Encoder)

class Encoder(tf.keras.Model):
    def __init__(self, units, vocab_size, embedding_dim, time_steps):
        super(Encoder, self).__init__()
        self.embedding = Embedding(vocab_size, embedding_dim, input_length=time_steps)
        self.dropout = Dropout(0.2)
        self.lstm = LSTM(units, return_state=True)
        
    def call(self, inputs):
        x = self.embedding(inputs)
        x = self.dropout(x)
        x, hidden_state, cell_state = self.lstm(x)
        return [hidden_state, cell_state]

학습용 디코더 (Decoder)

class Decoder(tf.keras.Model):
    def __init__(self, units, vocab_size, embedding_dim, time_steps):
        super(Decoder, self).__init__()
        self.embedding = Embedding(vocab_size, embedding_dim, input_length=time_steps)
        self.dropout = Dropout(0.2)
        self.lstm = LSTM(units, 
                         return_state=True, 
                         return_sequences=True, 
                        )
        self.dense = Dense(vocab_size, activation='softmax')
    
    def call(self, inputs, initial_state):
        x = self.embedding(inputs)
        x = self.dropout(x)
        x, hidden_state, cell_state = self.lstm(x, initial_state=initial_state)        
        x = self.dense(x)
        return x, hidden_state, cell_state

모델 결합

class Seq2Seq(tf.keras.Model):
    def __init__(self, units, vocab_size, embedding_dim, time_steps, start_token, end_token):
        super(Seq2Seq, self).__init__()
        self.start_token = start_token
        self.end_token = end_token
        self.time_steps = time_steps
        
        self.encoder = Encoder(units, vocab_size, embedding_dim, time_steps)
        self.decoder = Decoder(units, vocab_size, embedding_dim, time_steps)
        
    def call(self, inputs, training=True):
        if training:
            encoder_inputs, decoder_inputs = inputs
            context_vector = self.encoder(encoder_inputs)
            decoder_outputs, _, _ = self.decoder(inputs=decoder_inputs, initial_state=context_vector)
            return decoder_outputs
        else:
            context_vector = self.encoder(inputs)
            target_seq = tf.constant([[self.start_token]], dtype=tf.float32)
            results = tf.TensorArray(tf.int32, self.time_steps)
            
            for i in tf.range(self.time_steps):
                decoder_output, decoder_hidden, decoder_cell = self.decoder(target_seq, initial_state=context_vector)
                decoder_output = tf.cast(tf.argmax(decoder_output, axis=-1), dtype=tf.int32)
                decoder_output = tf.reshape(decoder_output, shape=(1, 1))
                results = results.write(i, decoder_output)
                
                if decoder_output == self.end_token:
                    break
                    
                target_seq = decoder_output
                context_vector = [decoder_hidden, decoder_cell]
                
            return tf.reshape(results.stack(), shape=(1, self.time_steps))

단어별 원핫인코딩 적용

단어별 원핫인코딩을 적용하는 이유는 decoder의 output(출력)을 원핫인코딩 vector로 변환하기 위함

VOCAB_SIZE = len(tokenizer.word_index)+1
def convert_to_one_hot(padded):
    # 원핫인코딩 초기화
    one_hot_vector = np.zeros((len(answer_out_padded), MAX_LENGTH, VOCAB_SIZE))

    # 디코더 목표를 원핫인코딩으로 변환
    # 학습시 입력은 인덱스이지만, 출력은 원핫인코딩 형식임
    for i, sequence in enumerate(answer_out_padded):
        for j, index in enumerate(sequence):
            one_hot_vector[i, j, index] = 1

    return one_hot_vector
answer_in_one_hot = convert_to_one_hot(answer_in_padded)
answer_out_one_hot = convert_to_one_hot(answer_out_padded)
answer_in_one_hot[0].shape, answer_in_one_hot[0].shape
((30, 12638), (30, 12638))

변환된 index를 다시 단어로 변환

def convert_index_to_text(indexs, end_token): 
    
    sentence = ''
    
    # 모든 문장에 대해서 반복
    for index in indexs:
        if index == end_token:
            # 끝 단어이므로 예측 중비
            break;
        # 사전에 존재하는 단어의 경우 단어 추가
        if index > 0 and tokenizer.index_word[index] is not None:
            sentence += tokenizer.index_word[index]
        else:
        # 사전에 없는 인덱스면 빈 문자열 추가
            sentence += ''
            
        # 빈칸 추가
        sentence += ' '
    return sentence

학습 (Training)

하이퍼 파라미터 정의

BUFFER_SIZE = 1000
BATCH_SIZE = 16
EMBEDDING_DIM = 100
TIME_STEPS = MAX_LENGTH
START_TOKEN = tokenizer.word_index['<START>']
END_TOKEN = tokenizer.word_index['<END>']

UNITS = 128

VOCAB_SIZE = len(tokenizer.word_index)+1
DATA_LENGTH = len(questions)
SAMPLE_SIZE = 3
NUM_EPOCHS = 20

체크포인트 생성

checkpoint_path = 'model/seq2seq-chatbot-no-attention-checkpoint.ckpt'
checkpoint = ModelCheckpoint(filepath=checkpoint_path, 
                             save_weights_only=True,
                             save_best_only=True, 
                             monitor='loss', 
                             verbose=1
                            )

분산환경 설정

strategy = tf.distribute.MirroredStrategy()
FLAG = True
if strategy.num_replicas_in_sync  > 1 and FLAG:
    MULTIPLE_BATCH = strategy.num_replicas_in_sync
    print(f'분산환경 사용 >> GPU: {MULTIPLE_BATCH}')
else:
    print(f'분산환경 미사용')
    MULTIPLE_BATCH = 1
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1')
분산환경 사용 >> GPU: 2

모델 생성 & compile

# 분산 환경 적용시
if MULTIPLE_BATCH > 1:
    print(f'분산환경 사용 >> GPU: {MULTIPLE_BATCH}')
    with strategy.scope():
        seq2seq = Seq2Seq(UNITS, VOCAB_SIZE, EMBEDDING_DIM, TIME_STEPS, START_TOKEN, END_TOKEN)
        seq2seq.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
else:
    print(f'분산환경 미사용')
    seq2seq = Seq2Seq(UNITS, VOCAB_SIZE, EMBEDDING_DIM, TIME_STEPS, START_TOKEN, END_TOKEN)
    seq2seq.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
분산환경 사용 >> GPU: 2
# 연속하여 학습시 체크포인트를 로드하여 이어서 학습합니다.
# seq2seq.load_weights(checkpoint_path)
def make_prediction(model, question_inputs):
    results = model(inputs=question_inputs, training=False)
    # 변환된 인덱스를 문장으로 변환
    results = np.asarray(results).reshape(-1)
    return results
for epoch in range(NUM_EPOCHS):
    print(f'processing epoch: {epoch * 10 + 1}...')
    seq2seq.fit([question_padded, answer_in_padded],
                answer_out_one_hot,
                epochs=10,
                batch_size=BATCH_SIZE*MULTIPLE_BATCH, 
                callbacks=[checkpoint]
               )
    # 랜덤한 샘플 번호 추출
    samples = np.random.randint(DATA_LENGTH, size=SAMPLE_SIZE)

    # 예측 성능 테스트
    for idx in samples:
        question_inputs = question_padded[idx]
        # 문장 예측
        results = make_prediction(seq2seq, np.expand_dims(question_inputs, 0))
        
        # 변환된 인덱스를 문장으로 변환
        results = convert_index_to_text(results, END_TOKEN)
        
        print(f'Q: {questions[idx]}')
        print(f'A: {results}\n')
        print()
processing epoch: 1...
Epoch 1/10
WARNING:tensorflow:From /home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
INFO:tensorflow:batch_all_reduce: 8 all-reduces with algorithm = nccl, num_packs = 1
WARNING:tensorflow:Efficient allreduce is not supported for 2 IndexedSlices
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:GPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1').
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:GPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1').
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:batch_all_reduce: 8 all-reduces with algorithm = nccl, num_packs = 1
WARNING:tensorflow:Efficient allreduce is not supported for 2 IndexedSlices
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:GPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1').
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:GPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1').
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
370/370 [==============================] - ETA: 0s - loss: 1.9503 - acc: 0.8001
Epoch 00001: loss improved from inf to 1.95035, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 1.9503 - acc: 0.8001
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
Epoch 2/10
370/370 [==============================] - ETA: 0s - loss: 1.2335 - acc: 0.8242
Epoch 00002: loss improved from 1.95035 to 1.23354, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 1.2335 - acc: 0.8242
Epoch 3/10
368/370 [============================>.] - ETA: 0s - loss: 1.1606 - acc: 0.8299
Epoch 00003: loss improved from 1.23354 to 1.15987, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 1.1599 - acc: 0.8300
Epoch 4/10
369/370 [============================>.] - ETA: 0s - loss: 1.1127 - acc: 0.8345
Epoch 00004: loss improved from 1.15987 to 1.11258, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 1.1126 - acc: 0.8346
Epoch 5/10
369/370 [============================>.] - ETA: 0s - loss: 1.0734 - acc: 0.8382
Epoch 00005: loss improved from 1.11258 to 1.07332, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 1.0733 - acc: 0.8382
Epoch 6/10
369/370 [============================>.] - ETA: 0s - loss: 1.0335 - acc: 0.8430
Epoch 00006: loss improved from 1.07332 to 1.03355, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 1.0335 - acc: 0.8430
Epoch 7/10
368/370 [============================>.] - ETA: 0s - loss: 0.9934 - acc: 0.8472
Epoch 00007: loss improved from 1.03355 to 0.99330, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.9933 - acc: 0.8472
Epoch 8/10
369/370 [============================>.] - ETA: 0s - loss: 0.9560 - acc: 0.8505
Epoch 00008: loss improved from 0.99330 to 0.95586, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.9559 - acc: 0.8506
Epoch 9/10
370/370 [==============================] - ETA: 0s - loss: 0.9218 - acc: 0.8538
Epoch 00009: loss improved from 0.95586 to 0.92182, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.9218 - acc: 0.8538
Epoch 10/10
370/370 [==============================] - ETA: 0s - loss: 0.8916 - acc: 0.8565
Epoch 00010: loss improved from 0.92182 to 0.89157, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.8916 - acc: 0.8565
Q: 긴 시간 이 흐른 후 면 괜찮아지겠지
A: 잘 않아요 


Q: 정말 다시 돌아 온다면
A: 잘 할 수 있을 거 예요 


Q: 더 알 고 싶어 지는 사람 이야
A: 잘 할 수 있을 거 예요 


processing epoch: 11...
Epoch 1/10
368/370 [============================>.] - ETA: 0s - loss: 0.8634 - acc: 0.8593
Epoch 00001: loss improved from 0.89157 to 0.86313, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.8631 - acc: 0.8594
Epoch 2/10
369/370 [============================>.] - ETA: 0s - loss: 0.8371 - acc: 0.8617
Epoch 00002: loss improved from 0.86313 to 0.83711, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.8371 - acc: 0.8617
Epoch 3/10
368/370 [============================>.] - ETA: 0s - loss: 0.8123 - acc: 0.8640
Epoch 00003: loss improved from 0.83711 to 0.81240, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.8124 - acc: 0.8640
Epoch 4/10
369/370 [============================>.] - ETA: 0s - loss: 0.7898 - acc: 0.8659
Epoch 00004: loss improved from 0.81240 to 0.78967, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.7897 - acc: 0.8659
Epoch 5/10
368/370 [============================>.] - ETA: 0s - loss: 0.7681 - acc: 0.8680
Epoch 00005: loss improved from 0.78967 to 0.76797, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.7680 - acc: 0.8680
Epoch 6/10
369/370 [============================>.] - ETA: 0s - loss: 0.7477 - acc: 0.8699
Epoch 00006: loss improved from 0.76797 to 0.74748, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.7475 - acc: 0.8699
Epoch 7/10
368/370 [============================>.] - ETA: 0s - loss: 0.7272 - acc: 0.8723
Epoch 00007: loss improved from 0.74748 to 0.72765, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.7277 - acc: 0.8722
Epoch 8/10
369/370 [============================>.] - ETA: 0s - loss: 0.7092 - acc: 0.8742
Epoch 00008: loss improved from 0.72765 to 0.70934, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.7093 - acc: 0.8742
Epoch 9/10
368/370 [============================>.] - ETA: 0s - loss: 0.6920 - acc: 0.8761
Epoch 00009: loss improved from 0.70934 to 0.69189, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.6919 - acc: 0.8761
Epoch 10/10
369/370 [============================>.] - ETA: 0s - loss: 0.6751 - acc: 0.8784
Epoch 00010: loss improved from 0.69189 to 0.67522, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.6752 - acc: 0.8784
Q: 시험 공부 큰일
A: 마음 이 좀 더 무너져요 


Q: 아부 도 기술 인가 봐
A: 마음 이 좀 더 무너져요 


Q: 참아야 하나
A: 마음 이 좀 더 무너져요 


processing epoch: 21...
Epoch 1/10
370/370 [==============================] - ETA: 0s - loss: 0.6591 - acc: 0.8804
Epoch 00001: loss improved from 0.67522 to 0.65912, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.6591 - acc: 0.8804
Epoch 2/10
369/370 [============================>.] - ETA: 0s - loss: 0.6446 - acc: 0.8822
Epoch 00002: loss improved from 0.65912 to 0.64455, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.6445 - acc: 0.8822
Epoch 3/10
368/370 [============================>.] - ETA: 0s - loss: 0.6307 - acc: 0.8839
Epoch 00003: loss improved from 0.64455 to 0.63055, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.6306 - acc: 0.8840
Epoch 4/10
368/370 [============================>.] - ETA: 0s - loss: 0.6174 - acc: 0.8860
Epoch 00004: loss improved from 0.63055 to 0.61766, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.6177 - acc: 0.8860
Epoch 5/10
368/370 [============================>.] - ETA: 0s - loss: 0.6053 - acc: 0.8875
Epoch 00005: loss improved from 0.61766 to 0.60515, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.6052 - acc: 0.8875
Epoch 6/10
368/370 [============================>.] - ETA: 0s - loss: 0.5937 - acc: 0.8892
Epoch 00006: loss improved from 0.60515 to 0.59356, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.5936 - acc: 0.8892
Epoch 7/10
368/370 [============================>.] - ETA: 0s - loss: 0.5820 - acc: 0.8911
Epoch 00007: loss improved from 0.59356 to 0.58186, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.5819 - acc: 0.8912
Epoch 8/10
369/370 [============================>.] - ETA: 0s - loss: 0.5717 - acc: 0.8927
Epoch 00008: loss improved from 0.58186 to 0.57181, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.5718 - acc: 0.8926
Epoch 9/10
370/370 [==============================] - ETA: 0s - loss: 0.5615 - acc: 0.8943
Epoch 00009: loss improved from 0.57181 to 0.56150, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.5615 - acc: 0.8943
Epoch 10/10
370/370 [==============================] - ETA: 0s - loss: 0.5523 - acc: 0.8959
Epoch 00010: loss improved from 0.56150 to 0.55227, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.5523 - acc: 0.8959
Q: 주말 이 행복해
A: 저 도 요 


Q: 재회 를 한다해 도
A: 저 도 모르는 걸 수도 있어요 


Q: 남자 들 은 여자 가 자기 좋아하는 거 알 게 되면 어떻게 해 싫지 않다면 받아줘
A: 저 도 요 


processing epoch: 31...
Epoch 1/10
369/370 [============================>.] - ETA: 0s - loss: 0.5426 - acc: 0.8976
Epoch 00001: loss improved from 0.55227 to 0.54271, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.5427 - acc: 0.8976
Epoch 2/10
368/370 [============================>.] - ETA: 0s - loss: 0.5346 - acc: 0.8986
Epoch 00002: loss improved from 0.54271 to 0.53466, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.5347 - acc: 0.8986
Epoch 3/10
368/370 [============================>.] - ETA: 0s - loss: 0.5264 - acc: 0.9000
Epoch 00003: loss improved from 0.53466 to 0.52620, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.5262 - acc: 0.9000
Epoch 4/10
368/370 [============================>.] - ETA: 0s - loss: 0.5184 - acc: 0.9014
Epoch 00004: loss improved from 0.52620 to 0.51850, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.5185 - acc: 0.9013
Epoch 5/10
370/370 [==============================] - ETA: 0s - loss: 0.5102 - acc: 0.9031
Epoch 00005: loss improved from 0.51850 to 0.51016, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.5102 - acc: 0.9031
Epoch 6/10
369/370 [============================>.] - ETA: 0s - loss: 0.5028 - acc: 0.9039
Epoch 00006: loss improved from 0.51016 to 0.50292, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.5029 - acc: 0.9039
Epoch 7/10
368/370 [============================>.] - ETA: 0s - loss: 0.4965 - acc: 0.9052
Epoch 00007: loss improved from 0.50292 to 0.49641, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4964 - acc: 0.9052
Epoch 8/10
368/370 [============================>.] - ETA: 0s - loss: 0.4894 - acc: 0.9065
Epoch 00008: loss improved from 0.49641 to 0.48952, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4895 - acc: 0.9065
Epoch 9/10
368/370 [============================>.] - ETA: 0s - loss: 0.4830 - acc: 0.9076
Epoch 00009: loss improved from 0.48952 to 0.48305, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4831 - acc: 0.9076
Epoch 10/10
368/370 [============================>.] - ETA: 0s - loss: 0.4772 - acc: 0.9088
Epoch 00010: loss improved from 0.48305 to 0.47715, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4772 - acc: 0.9088
Q: 보험료 설계 다시 해야 하나
A: 저 도 모르는 게 당연해요 


Q: 힘듭니다 오늘 또 무너졌어
A: 네 요 


Q: 주름 도 멋진 사람
A: 그게 인생 이 죠 


processing epoch: 41...
Epoch 1/10
370/370 [==============================] - ETA: 0s - loss: 0.4708 - acc: 0.9099
Epoch 00001: loss improved from 0.47715 to 0.47077, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4708 - acc: 0.9099
Epoch 2/10
369/370 [============================>.] - ETA: 0s - loss: 0.4650 - acc: 0.9112
Epoch 00002: loss improved from 0.47077 to 0.46498, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4650 - acc: 0.9112
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.4596 - acc: 0.9118
Epoch 00003: loss improved from 0.46498 to 0.45956, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4596 - acc: 0.9118
Epoch 4/10
370/370 [==============================] - ETA: 0s - loss: 0.4541 - acc: 0.9128
Epoch 00004: loss improved from 0.45956 to 0.45406, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4541 - acc: 0.9128
Epoch 5/10
368/370 [============================>.] - ETA: 0s - loss: 0.4482 - acc: 0.9143
Epoch 00005: loss improved from 0.45406 to 0.44844, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4484 - acc: 0.9142
Epoch 6/10
369/370 [============================>.] - ETA: 0s - loss: 0.4431 - acc: 0.9149
Epoch 00006: loss improved from 0.44844 to 0.44313, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4431 - acc: 0.9149
Epoch 7/10
369/370 [============================>.] - ETA: 0s - loss: 0.4375 - acc: 0.9162
Epoch 00007: loss improved from 0.44313 to 0.43753, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4375 - acc: 0.9162
Epoch 8/10
369/370 [============================>.] - ETA: 0s - loss: 0.4332 - acc: 0.9169
Epoch 00008: loss improved from 0.43753 to 0.43321, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4332 - acc: 0.9169
Epoch 9/10
370/370 [==============================] - ETA: 0s - loss: 0.4287 - acc: 0.9177
Epoch 00009: loss improved from 0.43321 to 0.42867, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4287 - acc: 0.9177
Epoch 10/10
368/370 [============================>.] - ETA: 0s - loss: 0.4236 - acc: 0.9186
Epoch 00010: loss improved from 0.42867 to 0.42360, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4236 - acc: 0.9187
Q: 자꾸 짝녀 얼굴 생각나네
A: 마음 의 준비 가 될 거 예요 


Q: 이별 2일 째
A: 저 는 위로 해드리는 로봇 이에요 


Q: 뭘 잘 못 했다는 걸까
A: 마음 이 따뜻할 것 같아요 


processing epoch: 51...
Epoch 1/10
369/370 [============================>.] - ETA: 0s - loss: 0.4194 - acc: 0.9194
Epoch 00001: loss improved from 0.42360 to 0.41948, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.4195 - acc: 0.9194
Epoch 2/10
369/370 [============================>.] - ETA: 0s - loss: 0.4151 - acc: 0.9201
Epoch 00002: loss improved from 0.41948 to 0.41514, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.4151 - acc: 0.9201
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.4095 - acc: 0.9212
Epoch 00003: loss improved from 0.41514 to 0.40957, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4096 - acc: 0.9212
Epoch 4/10
370/370 [==============================] - ETA: 0s - loss: 0.4059 - acc: 0.9219
Epoch 00004: loss improved from 0.40957 to 0.40586, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4059 - acc: 0.9219
Epoch 5/10
370/370 [==============================] - ETA: 0s - loss: 0.4019 - acc: 0.9224
Epoch 00005: loss improved from 0.40586 to 0.40190, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.4019 - acc: 0.9224
Epoch 6/10
370/370 [==============================] - ETA: 0s - loss: 0.3978 - acc: 0.9233
Epoch 00006: loss improved from 0.40190 to 0.39776, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3978 - acc: 0.9233
Epoch 7/10
370/370 [==============================] - ETA: 0s - loss: 0.3933 - acc: 0.9242
Epoch 00007: loss improved from 0.39776 to 0.39334, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3933 - acc: 0.9242
Epoch 8/10
368/370 [============================>.] - ETA: 0s - loss: 0.3896 - acc: 0.9248
Epoch 00008: loss improved from 0.39334 to 0.38977, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.3898 - acc: 0.9247
Epoch 9/10
369/370 [============================>.] - ETA: 0s - loss: 0.3849 - acc: 0.9256
Epoch 00009: loss improved from 0.38977 to 0.38503, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3850 - acc: 0.9256
Epoch 10/10
368/370 [============================>.] - ETA: 0s - loss: 0.3819 - acc: 0.9264
Epoch 00010: loss improved from 0.38503 to 0.38163, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3816 - acc: 0.9265
Q: 내 가 기대 를 너무 많이 했나 봐
A: 마음 의 준비 가 필요했을지도 몰라요 


Q: 짝사랑 하던 사람 못 잊을 것 같 애
A: 마음 의 준비 를 하세요 


Q: 결혼식 때 하객 이 없을 까봐 걱정 돼
A: 마음 의 준비 가 안 됐다고 말 해보세요 


processing epoch: 61...
Epoch 1/10
368/370 [============================>.] - ETA: 0s - loss: 0.3779 - acc: 0.9270
Epoch 00001: loss improved from 0.38163 to 0.37783, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.3778 - acc: 0.9270
Epoch 2/10
368/370 [============================>.] - ETA: 0s - loss: 0.3751 - acc: 0.9275
Epoch 00002: loss improved from 0.37783 to 0.37506, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.3751 - acc: 0.9275
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.3708 - acc: 0.9281
Epoch 00003: loss improved from 0.37506 to 0.37076, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3708 - acc: 0.9281
Epoch 4/10
370/370 [==============================] - ETA: 0s - loss: 0.3662 - acc: 0.9292
Epoch 00004: loss improved from 0.37076 to 0.36624, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3662 - acc: 0.9292
Epoch 5/10
370/370 [==============================] - ETA: 0s - loss: 0.3628 - acc: 0.9296
Epoch 00005: loss improved from 0.36624 to 0.36282, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3628 - acc: 0.9296
Epoch 6/10
368/370 [============================>.] - ETA: 0s - loss: 0.3593 - acc: 0.9303
Epoch 00006: loss improved from 0.36282 to 0.35948, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3595 - acc: 0.9303
Epoch 7/10
368/370 [============================>.] - ETA: 0s - loss: 0.3563 - acc: 0.9307
Epoch 00007: loss improved from 0.35948 to 0.35643, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3564 - acc: 0.9306
Epoch 8/10
370/370 [==============================] - ETA: 0s - loss: 0.3523 - acc: 0.9315
Epoch 00008: loss improved from 0.35643 to 0.35234, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3523 - acc: 0.9315
Epoch 9/10
369/370 [============================>.] - ETA: 0s - loss: 0.3491 - acc: 0.9321
Epoch 00009: loss improved from 0.35234 to 0.34919, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3492 - acc: 0.9321
Epoch 10/10
369/370 [============================>.] - ETA: 0s - loss: 0.3451 - acc: 0.9325
Epoch 00010: loss improved from 0.34919 to 0.34514, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3451 - acc: 0.9325
Q: 오늘 일도 안 했는데 엄청 피곤해
A: 마음 이 약해 탈이네요 


Q: 고백 했다 차이 면 어쩌지
A: 마음 이 복잡하겠어요 


Q: 예능 볼 게 없다
A: 축하 드려요 


processing epoch: 71...
Epoch 1/10
370/370 [==============================] - ETA: 0s - loss: 0.3407 - acc: 0.9334
Epoch 00001: loss improved from 0.34514 to 0.34066, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.3407 - acc: 0.9334
Epoch 2/10
368/370 [============================>.] - ETA: 0s - loss: 0.3371 - acc: 0.9340
Epoch 00002: loss improved from 0.34066 to 0.33714, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.3371 - acc: 0.9339
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.3335 - acc: 0.9348
Epoch 00003: loss improved from 0.33714 to 0.33353, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.3335 - acc: 0.9348
Epoch 4/10
368/370 [============================>.] - ETA: 0s - loss: 0.3297 - acc: 0.9351
Epoch 00004: loss improved from 0.33353 to 0.32975, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3297 - acc: 0.9351
Epoch 5/10
368/370 [============================>.] - ETA: 0s - loss: 0.3259 - acc: 0.9356
Epoch 00005: loss improved from 0.32975 to 0.32592, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3259 - acc: 0.9356
Epoch 6/10
369/370 [============================>.] - ETA: 0s - loss: 0.3218 - acc: 0.9361
Epoch 00006: loss improved from 0.32592 to 0.32178, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3218 - acc: 0.9361
Epoch 7/10
368/370 [============================>.] - ETA: 0s - loss: 0.3180 - acc: 0.9369
Epoch 00007: loss improved from 0.32178 to 0.31802, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3180 - acc: 0.9369
Epoch 8/10
369/370 [============================>.] - ETA: 0s - loss: 0.3141 - acc: 0.9374
Epoch 00008: loss improved from 0.31802 to 0.31412, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3141 - acc: 0.9374
Epoch 9/10
370/370 [==============================] - ETA: 0s - loss: 0.3103 - acc: 0.9379
Epoch 00009: loss improved from 0.31412 to 0.31029, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3103 - acc: 0.9379
Epoch 10/10
370/370 [==============================] - ETA: 0s - loss: 0.3067 - acc: 0.9388
Epoch 00010: loss improved from 0.31029 to 0.30666, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.3067 - acc: 0.9388
Q: 다시 한번 또
A: 제 가 곁 에 있을게요 


Q: 맨날 똑같 애
A: 천천히 지워질 거 예요 


Q: 여자친구 가 잘못 을 해도 다 공감 해주는게 옳은걸 까
A: 잘 하고 있나 봐요 


processing epoch: 81...
Epoch 1/10
368/370 [============================>.] - ETA: 0s - loss: 0.3028 - acc: 0.9391
Epoch 00001: loss improved from 0.30666 to 0.30281, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.3028 - acc: 0.9391
Epoch 2/10
370/370 [==============================] - ETA: 0s - loss: 0.2998 - acc: 0.9395
Epoch 00002: loss improved from 0.30281 to 0.29984, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2998 - acc: 0.9395
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.2961 - acc: 0.9402
Epoch 00003: loss improved from 0.29984 to 0.29602, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2960 - acc: 0.9402
Epoch 4/10
368/370 [============================>.] - ETA: 0s - loss: 0.2928 - acc: 0.9407
Epoch 00004: loss improved from 0.29602 to 0.29286, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2929 - acc: 0.9407
Epoch 5/10
368/370 [============================>.] - ETA: 0s - loss: 0.2891 - acc: 0.9413
Epoch 00005: loss improved from 0.29286 to 0.28902, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.2890 - acc: 0.9413
Epoch 6/10
370/370 [==============================] - ETA: 0s - loss: 0.2860 - acc: 0.9415
Epoch 00006: loss improved from 0.28902 to 0.28597, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2860 - acc: 0.9415
Epoch 7/10
369/370 [============================>.] - ETA: 0s - loss: 0.2822 - acc: 0.9424
Epoch 00007: loss improved from 0.28597 to 0.28228, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2823 - acc: 0.9423
Epoch 8/10
369/370 [============================>.] - ETA: 0s - loss: 0.2784 - acc: 0.9429
Epoch 00008: loss improved from 0.28228 to 0.27837, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2784 - acc: 0.9429
Epoch 9/10
369/370 [============================>.] - ETA: 0s - loss: 0.2741 - acc: 0.9434
Epoch 00009: loss improved from 0.27837 to 0.27412, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2741 - acc: 0.9434
Epoch 10/10
370/370 [==============================] - ETA: 0s - loss: 0.2710 - acc: 0.9439
Epoch 00010: loss improved from 0.27412 to 0.27095, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2710 - acc: 0.9439
Q: 화장품 이 필요해
A: 그 사람 도 설렐 거 예요 


Q: 나 한테 상의 좀 하지
A: 좋은 걸 로 시작 해보세요 


Q: 연애 세포 깨우는 법
A: 사랑 은 끝나도 당신 의 인생 을 평가 할 수 없어요 


processing epoch: 91...
Epoch 1/10
370/370 [==============================] - ETA: 0s - loss: 0.2678 - acc: 0.9442
Epoch 00001: loss improved from 0.27095 to 0.26782, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.2678 - acc: 0.9442
Epoch 2/10
368/370 [============================>.] - ETA: 0s - loss: 0.2643 - acc: 0.9450
Epoch 00002: loss improved from 0.26782 to 0.26438, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.2644 - acc: 0.9450
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.2610 - acc: 0.9452
Epoch 00003: loss improved from 0.26438 to 0.26100, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.2610 - acc: 0.9452
Epoch 4/10
368/370 [============================>.] - ETA: 0s - loss: 0.2572 - acc: 0.9461
Epoch 00004: loss improved from 0.26100 to 0.25733, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2573 - acc: 0.9461
Epoch 5/10
370/370 [==============================] - ETA: 0s - loss: 0.2545 - acc: 0.9460
Epoch 00005: loss improved from 0.25733 to 0.25446, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2545 - acc: 0.9460
Epoch 6/10
370/370 [==============================] - ETA: 0s - loss: 0.2513 - acc: 0.9467
Epoch 00006: loss improved from 0.25446 to 0.25127, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2513 - acc: 0.9467
Epoch 7/10
369/370 [============================>.] - ETA: 0s - loss: 0.2478 - acc: 0.9473
Epoch 00007: loss improved from 0.25127 to 0.24786, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2479 - acc: 0.9473
Epoch 8/10
368/370 [============================>.] - ETA: 0s - loss: 0.2453 - acc: 0.9478
Epoch 00008: loss improved from 0.24786 to 0.24514, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2451 - acc: 0.9479
Epoch 9/10
369/370 [============================>.] - ETA: 0s - loss: 0.2409 - acc: 0.9484
Epoch 00009: loss improved from 0.24514 to 0.24096, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2410 - acc: 0.9484
Epoch 10/10
370/370 [==============================] - ETA: 0s - loss: 0.2382 - acc: 0.9487
Epoch 00010: loss improved from 0.24096 to 0.23816, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2382 - acc: 0.9487
Q: 짝남 잊으려고 나 혼자 나쁘게 생각 하는 내 자신 이 증오 스러워
A: 사랑 의 예의 가 없는 사람 이네 요 


Q: 난 또 바보 ㅠㅠ
A: 확신 이 들 때 까지 준비 해보세요 


Q: 난 진짜 쓰레기 야
A: 네 말씀 해주세요 


processing epoch: 101...
Epoch 1/10
369/370 [============================>.] - ETA: 0s - loss: 0.2363 - acc: 0.9488
Epoch 00001: loss improved from 0.23816 to 0.23633, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.2363 - acc: 0.9488
Epoch 2/10
370/370 [==============================] - ETA: 0s - loss: 0.2329 - acc: 0.9496
Epoch 00002: loss improved from 0.23633 to 0.23294, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2329 - acc: 0.9496
Epoch 3/10
368/370 [============================>.] - ETA: 0s - loss: 0.2301 - acc: 0.9499
Epoch 00003: loss improved from 0.23294 to 0.22996, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.2300 - acc: 0.9499
Epoch 4/10
368/370 [============================>.] - ETA: 0s - loss: 0.2260 - acc: 0.9508
Epoch 00004: loss improved from 0.22996 to 0.22621, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2262 - acc: 0.9507
Epoch 5/10
368/370 [============================>.] - ETA: 0s - loss: 0.2229 - acc: 0.9512
Epoch 00005: loss improved from 0.22621 to 0.22294, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2229 - acc: 0.9512
Epoch 6/10
369/370 [============================>.] - ETA: 0s - loss: 0.2201 - acc: 0.9518
Epoch 00006: loss improved from 0.22294 to 0.22017, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2202 - acc: 0.9517
Epoch 7/10
370/370 [==============================] - ETA: 0s - loss: 0.2174 - acc: 0.9522
Epoch 00007: loss improved from 0.22017 to 0.21743, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2174 - acc: 0.9522
Epoch 8/10
368/370 [============================>.] - ETA: 0s - loss: 0.2147 - acc: 0.9525
Epoch 00008: loss improved from 0.21743 to 0.21461, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2146 - acc: 0.9525
Epoch 9/10
369/370 [============================>.] - ETA: 0s - loss: 0.2121 - acc: 0.9530
Epoch 00009: loss improved from 0.21461 to 0.21210, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2121 - acc: 0.9530
Epoch 10/10
368/370 [============================>.] - ETA: 0s - loss: 0.2092 - acc: 0.9533
Epoch 00010: loss improved from 0.21210 to 0.20912, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2091 - acc: 0.9533
Q: 이별 한지 한 달 된 남자 입니다
A: 생각 을 정리 하는 방법 이 더 자주 만나세요 


Q: 비밀 로 했는데 들켜서 오해 하고 있어
A: 감기 조심하세요 


Q: 으 휴 집 에 들어가기가 싫네
A: 집 에 있거나 나가면 마스크 쓰고 나가세요 


processing epoch: 111...
Epoch 1/10
370/370 [==============================] - ETA: 0s - loss: 0.2066 - acc: 0.9537
Epoch 00001: loss improved from 0.20912 to 0.20658, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2066 - acc: 0.9537
Epoch 2/10
369/370 [============================>.] - ETA: 0s - loss: 0.2035 - acc: 0.9545
Epoch 00002: loss improved from 0.20658 to 0.20348, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.2035 - acc: 0.9545
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.2009 - acc: 0.9548
Epoch 00003: loss improved from 0.20348 to 0.20091, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.2009 - acc: 0.9548
Epoch 4/10
370/370 [==============================] - ETA: 0s - loss: 0.1969 - acc: 0.9561
Epoch 00004: loss improved from 0.20091 to 0.19690, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1969 - acc: 0.9561
Epoch 5/10
368/370 [============================>.] - ETA: 0s - loss: 0.1945 - acc: 0.9560
Epoch 00005: loss improved from 0.19690 to 0.19452, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1945 - acc: 0.9560
Epoch 6/10
369/370 [============================>.] - ETA: 0s - loss: 0.1926 - acc: 0.9564
Epoch 00006: loss improved from 0.19452 to 0.19260, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1926 - acc: 0.9564
Epoch 7/10
369/370 [============================>.] - ETA: 0s - loss: 0.1895 - acc: 0.9568
Epoch 00007: loss improved from 0.19260 to 0.18954, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1895 - acc: 0.9568
Epoch 8/10
369/370 [============================>.] - ETA: 0s - loss: 0.1875 - acc: 0.9570
Epoch 00008: loss improved from 0.18954 to 0.18752, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1875 - acc: 0.9570
Epoch 9/10
369/370 [============================>.] - ETA: 0s - loss: 0.1851 - acc: 0.9575
Epoch 00009: loss improved from 0.18752 to 0.18502, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1850 - acc: 0.9575
Epoch 10/10
370/370 [==============================] - ETA: 0s - loss: 0.1826 - acc: 0.9581
Epoch 00010: loss improved from 0.18502 to 0.18261, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1826 - acc: 0.9581
Q: 나 사랑 할 자격 없는 사람 이야
A: 사랑 에 빠졌나 봐요 


Q: 여자친구 가 너무 무뚝뚝해
A: 그건 중요하지 않아요 신경 을 덜어 보세요 


Q: 코골 이 어떻게 고쳐
A: 피곤한건 아닌지 살펴보세요 


processing epoch: 121...
Epoch 1/10
368/370 [============================>.] - ETA: 0s - loss: 0.1834 - acc: 0.9578
Epoch 00001: loss did not improve from 0.18261
370/370 [==============================] - 10s 27ms/step - loss: 0.1834 - acc: 0.9578
Epoch 2/10
369/370 [============================>.] - ETA: 0s - loss: 0.1773 - acc: 0.9594
Epoch 00002: loss improved from 0.18261 to 0.17730, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.1773 - acc: 0.9594
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.1741 - acc: 0.9600
Epoch 00003: loss improved from 0.17730 to 0.17409, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1741 - acc: 0.9600
Epoch 4/10
369/370 [============================>.] - ETA: 0s - loss: 0.1721 - acc: 0.9602
Epoch 00004: loss improved from 0.17409 to 0.17215, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1722 - acc: 0.9602
Epoch 5/10
369/370 [============================>.] - ETA: 0s - loss: 0.1699 - acc: 0.9608
Epoch 00005: loss improved from 0.17215 to 0.16987, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1699 - acc: 0.9608
Epoch 6/10
368/370 [============================>.] - ETA: 0s - loss: 0.1677 - acc: 0.9609
Epoch 00006: loss improved from 0.16987 to 0.16768, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1677 - acc: 0.9609
Epoch 7/10
370/370 [==============================] - ETA: 0s - loss: 0.1653 - acc: 0.9615
Epoch 00007: loss improved from 0.16768 to 0.16532, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1653 - acc: 0.9615
Epoch 8/10
370/370 [==============================] - ETA: 0s - loss: 0.1643 - acc: 0.9618
Epoch 00008: loss improved from 0.16532 to 0.16435, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1643 - acc: 0.9618
Epoch 9/10
370/370 [==============================] - ETA: 0s - loss: 0.1620 - acc: 0.9619
Epoch 00009: loss improved from 0.16435 to 0.16197, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1620 - acc: 0.9619
Epoch 10/10
368/370 [============================>.] - ETA: 0s - loss: 0.1580 - acc: 0.9630
Epoch 00010: loss improved from 0.16197 to 0.15817, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1582 - acc: 0.9630
Q: 여자친구 와 여행 가는게 부담스러 움
A: 내일 도 만난다면 말 을 걸어 보세요 


Q: 힘들어
A: 생각 보다 많은 시간 이 지났네요 


Q: 요즘 드라마 너무 재밌네
A: 저 도 듣고 싶네요 


processing epoch: 131...
Epoch 1/10
368/370 [============================>.] - ETA: 0s - loss: 0.1565 - acc: 0.9635
Epoch 00001: loss improved from 0.15817 to 0.15641, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.1564 - acc: 0.9635
Epoch 2/10
370/370 [==============================] - ETA: 0s - loss: 0.1538 - acc: 0.9641
Epoch 00002: loss improved from 0.15641 to 0.15379, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1538 - acc: 0.9641
Epoch 3/10
368/370 [============================>.] - ETA: 0s - loss: 0.1521 - acc: 0.9645
Epoch 00003: loss improved from 0.15379 to 0.15216, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1522 - acc: 0.9645
Epoch 4/10
368/370 [============================>.] - ETA: 0s - loss: 0.1502 - acc: 0.9646
Epoch 00004: loss improved from 0.15216 to 0.15020, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1502 - acc: 0.9646
Epoch 5/10
370/370 [==============================] - ETA: 0s - loss: 0.1475 - acc: 0.9649
Epoch 00005: loss improved from 0.15020 to 0.14745, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1475 - acc: 0.9649
Epoch 6/10
369/370 [============================>.] - ETA: 0s - loss: 0.1492 - acc: 0.9648
Epoch 00006: loss did not improve from 0.14745
370/370 [==============================] - 10s 28ms/step - loss: 0.1492 - acc: 0.9648
Epoch 7/10
370/370 [==============================] - ETA: 0s - loss: 0.1448 - acc: 0.9657
Epoch 00007: loss improved from 0.14745 to 0.14482, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1448 - acc: 0.9657
Epoch 8/10
369/370 [============================>.] - ETA: 0s - loss: 0.1414 - acc: 0.9666
Epoch 00008: loss improved from 0.14482 to 0.14138, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1414 - acc: 0.9666
Epoch 9/10
369/370 [============================>.] - ETA: 0s - loss: 0.1386 - acc: 0.9669
Epoch 00009: loss improved from 0.14138 to 0.13867, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1387 - acc: 0.9669
Epoch 10/10
368/370 [============================>.] - ETA: 0s - loss: 0.1380 - acc: 0.9670
Epoch 00010: loss improved from 0.13867 to 0.13802, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.1380 - acc: 0.9670
Q: 이러고 있을 때 가 아니야
A: 행동 할 때 인 것 같네요 응원 해요 


Q: 엿같다
A: 벗어나는 게 좋겠네요 


Q: 칭찬 좀 해봐
A: 지금 도 늦지 않았어요 


processing epoch: 141...
Epoch 1/10
370/370 [==============================] - ETA: 0s - loss: 0.1355 - acc: 0.9677
Epoch 00001: loss improved from 0.13802 to 0.13554, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.1355 - acc: 0.9677
Epoch 2/10
368/370 [============================>.] - ETA: 0s - loss: 0.1335 - acc: 0.9684
Epoch 00002: loss improved from 0.13554 to 0.13353, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1335 - acc: 0.9684
Epoch 3/10
369/370 [============================>.] - ETA: 0s - loss: 0.1325 - acc: 0.9683
Epoch 00003: loss improved from 0.13353 to 0.13253, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 27ms/step - loss: 0.1325 - acc: 0.9683
Epoch 4/10
369/370 [============================>.] - ETA: 0s - loss: 0.1297 - acc: 0.9689
Epoch 00004: loss improved from 0.13253 to 0.12975, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1298 - acc: 0.9689
Epoch 5/10
368/370 [============================>.] - ETA: 0s - loss: 0.1289 - acc: 0.9690
Epoch 00005: loss improved from 0.12975 to 0.12883, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1288 - acc: 0.9690
Epoch 6/10
368/370 [============================>.] - ETA: 0s - loss: 0.1264 - acc: 0.9697
Epoch 00006: loss improved from 0.12883 to 0.12654, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1265 - acc: 0.9696
Epoch 7/10
370/370 [==============================] - ETA: 0s - loss: 0.1251 - acc: 0.9698
Epoch 00007: loss improved from 0.12654 to 0.12509, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1251 - acc: 0.9698
Epoch 8/10
368/370 [============================>.] - ETA: 0s - loss: 0.1233 - acc: 0.9700
Epoch 00008: loss improved from 0.12509 to 0.12327, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1233 - acc: 0.9700
Epoch 9/10
370/370 [==============================] - ETA: 0s - loss: 0.1219 - acc: 0.9705
Epoch 00009: loss improved from 0.12327 to 0.12190, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1219 - acc: 0.9705
Epoch 10/10
368/370 [============================>.] - ETA: 0s - loss: 0.1193 - acc: 0.9712
Epoch 00010: loss improved from 0.12190 to 0.11929, saving model to model/seq2seq-chatbot-no-attention-checkpoint.ckpt
370/370 [==============================] - 10s 28ms/step - loss: 0.1193 - acc: 0.9712
Q: 벌써 저 를 세번 째 떠나갔네
A: 이 젠 잊어버리세요 미련 은 독 이 됩니다 


Q: 짝사랑 하는 사람과 친해질 수 있는 방법 조언 좀
A: 공통 관심사 를 찾아보세요 


Q: 짝남 한테 고백 한 다 안 한다
A: 무시 당하는 기분 이 들어서 너무 외 롭고 상처 받게 된다고 차분하고 부드럽게 말 해보세요 


processing epoch: 151...

예측

# 자연어 (질문 입력) 대한 전처리 함수
def make_question(sentence):
    sentence = clean_and_morph(sentence)
    question_sequence = tokenizer.texts_to_sequences([sentence])
    question_padded = pad_sequences(question_sequence, maxlen=MAX_LENGTH, truncating='post', padding='post')
    return question_padded
make_question('오늘 날씨가 정말 화창합니다')
make_question('찐찐찐찐찐이야~ 완전 찐이야~')
def run_chatbot(question):
    question_inputs = make_question(question)
    results = make_prediction(seq2seq, question_inputs)
    results = convert_index_to_text(results, END_TOKEN)
    return results

유저로부터 Text 입력 값을 받아 답변 출력

while True:
    user_input = input('<< 말을 걸어 보세요!\n')
    if user_input == 'q':
        break
    print('>> 챗봇 응답: {}'.format(run_chatbot(user_input)))