파일 목록
-
📁 makenpz
-
📁 pip
-
📁 venv
- AIDB.png
- crossover_latent.npy
- requirements.txt
- test.py
- Title.png
#%%
#사용할 라이브러리 로드
import tensorflow as tf
from tensorflow import keras
from keras.layers import Dense
from keras.models import Sequential
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from IPython.display import Image
import imageio
from skimage import color
from skimage.transform import resize
from keras.layers import Dense, Input, Conv2D, Conv2DTranspose, Flatten, Reshape, Cropping2D
from keras.models import Model
from keras.layers import BatchNormalization, Dropout, Activation, MaxPool2D, concatenate
warnings.filterwarnings('ignore')
%matplotlib inline
SEED = 34
#%%
#GPU와 파일 로딩시 메모리 설정
# GPU 사용 설정
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# GPU 메모리 사용 제한 설정 (필요에 따라 조절)
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
#%%
# 파일 경로
file_path = r"C:\Users\remil\바탕 화면\productive\final.npz"
#로드
data = np.load(file_path)
#%%
#데이터 로드
"""
train_generator = CombinedDataGenerator(data["train_images"], batch_size=128)
test_generator = CombinedDataGenerator(data["test_images"], batch_size=128)
"""
#%%
#값 출력 밑처리
train_images = data["train_images"]
test_images = data["test_images"]
train_gray = np.array([color.rgb2gray(img).reshape((256, 256, 1)) for img in train_images])
test_gray = np.array([color.rgb2gray(img).reshape((256, 256, 1)) for img in test_images])
#%%
#값 출력
print(train_images.min(), train_images.max())
print(test_images.min(), test_images.max())
print(train_gray.min(), train_gray.max())
print(test_gray.min(), test_gray.max())
#%%
#데이터 유형 확인
print(train_images.dtype, train_images.shape)
print(test_images.dtype, test_images.shape)
print(train_gray.dtype, train_gray.shape)
print(test_gray.dtype, test_gray.shape)
#%%
#이미지 확인
plt.imshow(test_images[0])
#%%
#이미지 확인2
plt.imshow(test_gray[0])
#%%
#모델 생성
def conv2d_block(x, channel):
x = Conv2D(channel, 3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(channel, 3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def unet_color():
inputs = Input((256, 256, 1))
c1 = conv2d_block(inputs, 16)
p1 = MaxPool2D(2)(c1)
p1 = Dropout(0.1)(p1)
c2 = conv2d_block(p1, 32)
p2 = MaxPool2D(2)(c2)
p2 = Dropout(0.1)(p2)
c3 = conv2d_block(p2, 64)
p3 = MaxPool2D(2)(c3)
p3 = Dropout(0.1)(p3)
c4 = conv2d_block(p3, 128)
p4 = MaxPool2D(2)(c4)
p4 = Dropout(0.1)(p4)
c5 = conv2d_block(p4, 256)
u6 = Conv2DTranspose(128, 2, 2, padding="valid")(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(0.1)(u6)
c6 = conv2d_block(u6, 128)
u7 = Conv2DTranspose(64, 2, 2, padding="valid")(c6)
u7 = concatenate([u7, c3])
u7 = Dropout(0.1)(u7)
c7 = conv2d_block(u7, 64)
u8 = Conv2DTranspose(32, 2, 2, padding="valid")(c7)
u8 = concatenate([u8, c2])
u8 = Dropout(0.1)(u8)
c8 = conv2d_block(u8, 32)
u9 = Conv2DTranspose(16, 2, 2, padding="valid")(c8)
u9 = concatenate([u9, c1])
u9 = Dropout(0.1)(u9)
c9 = conv2d_block(u9, 16)
outputs = Conv2D(3, 1, activation="sigmoid")(c9)
model = Model(inputs, outputs)
return model
#%%
#서머리 출력
model = unet_color()
model.summary()
#%%
#로스 옵티마이저 매트릭 설정
model.compile(loss = "mse", optimizer="adam", metrics=["accuracy"])
#%%
#모델 학습
hist = model.fit(train_gray, train_images, validation_data=(test_gray, test_images), epochs=50, verbose=1)
#%%
model.save('./color.h5')
#%%
tf.keras.models.save_model(model, 'color.keras')
# %%
harry = imageio.imread('./harry.jpg')
original_shape = harry.shape
harry_resized = resize(harry, (256, 256))
# 모델 예측 (확률값)
test = model.predict(harry_resized)
# 결과 표시
plt.imshow(test)
plt.show()
# %%