Menu



Manage

Cord > Project_AI이미지 처리 전체 다운로드
Project_AI이미지 처리 > AI_making/sizer.py Lines 298 | 7.3 KB
다운로드

                        #%%
#사용할 라이브러리 로드
import tensorflow as tf
from tensorflow import keras
from keras.layers import Dense
from keras.models import Sequential

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from IPython.display import Image

import imageio
from skimage.transform import resize

from keras.layers import Dense, Input, Conv2D, Conv2DTranspose, Flatten, Reshape, MaxPool2D, Cropping2D
from keras.models import Model
from keras.layers import BatchNormalization, Dropout, Activation, concatenate

warnings.filterwarnings('ignore')
%matplotlib inline

SEED = 34

#%%
#GPU와 파일 로딩시 메모리 설정
# GPU 사용 설정
gpus = tf.config.list_physical_devices('GPU')
if gpus:
    try:
        # GPU 메모리 사용 제한 설정 (필요에 따라 조절)
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        logical_gpus = tf.config.list_logical_devices('GPU')
        print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
    except RuntimeError as e:
        print(e)

#데이터 로드
class CombinedDataGenerator(keras.utils.Sequence):
    def __init__(self, images, batch_size=128, input_size=(128, 128), target_size=(256, 256)):
        self.images = images
        self.batch_size = batch_size
        self.input_size = input_size
        self.target_size = target_size

    def __len__(self):
        return int(np.ceil(len(self.images) / float(self.batch_size)))

    def __getitem__(self, idx):
        batch_images = self.images[idx * self.batch_size:(idx + 1) * self.batch_size]
        input_images = []
        target_images = []
        for img in batch_images:
            input_img = resize(img, self.input_size)
            input_images.append(input_img)
            target_images.append(img)
        return np.array(input_images), np.array(target_images)

#%%
# 파일 경로
file_path = r"C:\Users\remil\바탕 화면\productive\final.npz"

#로드
data = np.load(file_path)

#%%
#데이터 로드
train_generator = CombinedDataGenerator(data["train_images"], batch_size=128)
test_generator = CombinedDataGenerator(data["test_images"], batch_size=128)

#%%
#값 출력
tr_images, train = train_generator[0]
t_images, test = test_generator[0]
print(tr_images.min(), tr_images.max())
print(t_images.min(), t_images.max())

#%%
#데이터 유형 확인
print(tr_images.dtype, tr_images.shape)
print(t_images.dtype, t_images.shape)

#%%
#이미지 확인
plt.imshow(tr_images[0])

#%%
#이미지 확인2
plt.imshow(t_images[0])

#%%
#모델 작성
from keras.layers import Dense, Input, Conv2D, Conv2DTranspose, Flatten, Reshape
from keras.models import Model
from keras.layers import MaxPool2D, BatchNormalization, Dropout, Activation, concatenate

def conv2d_block(x, channel):
    x = Conv2D(channel, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(channel, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x

def unet_resoultion():
    inputs = Input((128, 128, 3))

    c1 = conv2d_block(inputs, 16)
    p1 = MaxPool2D(2)(c1)
    p1 = Dropout(0.1)(p1)

    c2 = conv2d_block(p1, 32)
    p2 = MaxPool2D(2)(c2)
    p2 = Dropout(0.1)(p2)

    c3 = conv2d_block(p2, 64)
    p3 = MaxPool2D(2)(c3)
    p3 = Dropout(0.1)(p3)

    c4 = conv2d_block(p3, 128)
    p4 = MaxPool2D(2)(c4)
    p4 = Dropout(0.1)(p4)

    c5 = conv2d_block(p4, 256)

    u6 = Conv2DTranspose(128, 2, 2)(c5)
    u6 = concatenate([u6, c4])
    u6 = Dropout(0.1)(u6)
    c6 = conv2d_block(u6, 128)

    u7 = Conv2DTranspose(64, 2, 2, padding="valid")(c6)
    u7 = concatenate([u7, c3])
    u7 = Dropout(0.1)(u7)
    c7 = conv2d_block(u7, 64)

    u8 = Conv2DTranspose(32, 2, 2, padding="valid")(c7)
    u8 = concatenate([u8, c2])
    u8 = Dropout(0.1)(u8)
    c8 = conv2d_block(u8, 32)

    u9 = Conv2DTranspose(16, 2, 2, padding="valid")(c8)
    u9 = concatenate([u9, c1])
    u9 = Dropout(0.1)(u9)
    c9 = conv2d_block(u9, 16)

    u10 = Conv2DTranspose(16, 2, 2, padding="valid")(c9)
    outputs = Conv2D(3, 1, activation="sigmoid")(u10)
    model = Model(inputs, outputs)
    return model

#%%
#서머리 출력
model = unet_resoultion()
model.summary()

#%%
#로스 옵티마이저 매트릭 설정
model.compile(loss='mae', optimizer='adam', metrics=['accuracy'])

#%%
#모델 학습
hist = model.fit(train_generator, validation_data=(test_generator), epochs=25, verbose=1)

#%%
#진행 사항 출력
plt.plot(hist.history['accuracy'], label='accuracy')
plt.plot(hist.history['val_accuracy'], label='val_accuracy')
plt.plot(hist.history['loss'], label='loss')
plt.plot(hist.history['val_loss'], label='val_loss')
plt.legend(loc='upper left')
plt.show()

#%%
model.save('./sizer.h5')
#%%
tf.keras.models.save_model(model, 'sizer.keras')

#%%
harry = imageio.imread('./harry.jpeg')
original_shape = harry.shape

# 이미지 크기를 (128, 128, 3)으로 조정
harry_resized = resize(harry, (128, 128))

# 예측
test = model.predict(harry_resized[None, ...])

# 예측 결과를 원본 크기로 다시 조정
test_resized = resize(test[0], original_shape[:2])

# 잘라낸 이미지 표시
plt.imshow(test_resized)
plt.show()
#%%

#%%
#더 큰 이미지에서 사용 가능 방식으로 변경
class CombinedDataGenerator(keras.utils.Sequence):
    def __init__(self, images, batch_size=128, input_size=(256, 256), target_size=(256, 256)):
        self.images = images
        self.batch_size = batch_size
        self.input_size = input_size
        self.target_size = target_size

    def __len__(self):
        return int(np.ceil(len(self.images) / float(self.batch_size)))

    def __getitem__(self, idx):
        batch_images = self.images[idx * self.batch_size:(idx + 1) * self.batch_size]
        input_images = []
        target_images = []
        for img in batch_images:
            input_img = resize(img, (256, 256))  # 크기를 256x256으로 변경
            input_images.append(input_img)
            target_images.append(img)  # target_images는 원본 크기 유지
        return np.array(input_images), np.array(target_images)

#%%
#데이터셋 수정
train_generator = CombinedDataGenerator(data["train_images"], batch_size=128)
test_generator = CombinedDataGenerator(data["test_images"], batch_size=128)

#%%
from keras.layers import Average

def srcnn():
    inputs = Input((256, 256, 3))
    x = Conv2D(64, 9, activation = 'relu', padding="same")(inputs)

    x1 = Conv2D(32, 1, activation = 'relu', padding="same")(x)
    x2 = Conv2D(32, 3, activation = 'relu', padding="same")(x)
    x3 = Conv2D(32, 5, activation = 'relu', padding="same")(x)
    x = Average()([x1, x2, x3])

    outputs = Conv2D(3, 5, activation="relu", padding="same")(x)
    model = Model(inputs, outputs)
    model.compile(loss='mae', optimizer='adam', metrics=['accuracy'])
    return model

model2 = srcnn()
model2.summary()

#%%
hist2 = model2.fit(train_generator, validation_data=(test_generator), epochs=25, verbose=1)

#%%
model2.save('./sizer_1.h5')
#%%
tf.keras.models.save_model(model2, 'sizer_1.keras')









































# %%