긍정적인 사고와 행동으로 선한 영향력을 줄 수 있도록

Python

TensorFlow 2.x버전으로 CNN 실습하기 # 1 (문자 분석)

리거니 2022. 2. 11. 15:59
import numpy as np
import tensorflow as tf
    # 데이터 불러오기
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D
from tensorflow.keras.layers import Flatten, Dense, Dropout

(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.reshape(-1, 28, 28, 1) # 텐서로 변환, 높이, 너비, 채널
x_test = x_test.reshape(-1, 28, 28, 1)

print(x_train.shape, x_test.shape)
print(y_train.shape, y_test.shape)

    # 255.0 정규화
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)

 

# CNN 모델구축
cnn = Sequential()

cnn.add(Conv2D(input_shape=(28,28,1), kernel_size=(3,3), filters=32, activation='relu'))
cnn.add(Conv2D(kernel_size=(3,3), filters=64, activation='relu'))
cnn.add(MaxPool2D(pool_size=(2,2)))
cnn.add(Dropout(0.25))

cnn.add(Flatten())  # 3차원 텐서를 1차원 벡터로 변환

cnn.add(Dense(128, activation='relu'))  # 은닉층 개념
cnn.add(Dropout(0.5))
cnn.add(Dense(10, activation='softmax'))    # 출력층
# CNN 모델 컴파일 및 학습

cnn.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy'])

hist = cnn.fit(x_train, y_train, batch_size=128, epochs=30, validation_data=(x_test, y_test))

cnn.evaluate(x_test, y_test)    # 모델 정확도 평가

 

# 정확도 및 손실 1
import matplotlib.pyplot as plt

plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Accuracy Trend')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='best')

plt.grid()
plt.show()

 

# 정확도 및 손실 2
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss Trend')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='best')

plt.grid()
plt.show()