import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
# ตั้งค่าไดเรกทอรีของข้อมูล
train_dir = "train"
validation_dir = "val"
test_dir = "test"
# เพิ่ม Data Augmentation
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True, # เพิ่มการพลิกในแนวตั้ง
brightness_range=[0.8, 1.2], # ปรับความสว่าง
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1./255)
# โหลดข้อมูล
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary'
)
# ปรับปรุงโครงสร้างโมเดล
model = keras.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)),
layers.BatchNormalization(), # เพิ่ม Batch Normalization
layers.MaxPooling2D(2, 2),
layers.Dropout(0.25), # เพิ่ม Dropout
layers.Conv2D(64, (3, 3), activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(2, 2),
layers.Dropout(0.25),
layers.Conv2D(128, (3, 3), activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(2, 2),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.5), # เพิ่ม Dropout ใน Fully Connected Layer
layers.Dense(1, activation='sigmoid')
])
# กำหนดออปติไมเซอร์และค่าพารามิเตอร์
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss='binary_crossentropy',
metrics=['accuracy']
)
# ใช้ ReduceLROnPlateau เพื่อลด Learning Rate เมื่อโมเดลเริ่มหยุดพัฒนา
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=3,
min_lr=1e-6
)
# ฝึกโมเดล
history = model.fit(
train_generator,
validation_data=validation_generator,
epochs=30,
callbacks=[reduce_lr]
)
# แสดงกราฟผลลัพธ์
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.legend()
plt.title('Accuracy')
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend()
plt.title('Loss')
plt.show()
aW1wb3J0IHRlbnNvcmZsb3cgYXMgdGYKZnJvbSB0ZW5zb3JmbG93IGltcG9ydCBrZXJhcwpmcm9tIHRlbnNvcmZsb3cua2VyYXMgaW1wb3J0IGxheWVycwpmcm9tIHRlbnNvcmZsb3cua2VyYXMucHJlcHJvY2Vzc2luZy5pbWFnZSBpbXBvcnQgSW1hZ2VEYXRhR2VuZXJhdG9yCmltcG9ydCBtYXRwbG90bGliLnB5cGxvdCBhcyBwbHQKCiMg4LiV4Lix4LmJ4LiH4LiE4LmI4Liy4LmE4LiU4LmA4Lij4LiB4LiX4Lit4Lij4Li14LiC4Lit4LiH4LiC4LmJ4Lit4Lih4Li54LilCnRyYWluX2RpciA9ICJ0cmFpbiIKdmFsaWRhdGlvbl9kaXIgPSAidmFsIgp0ZXN0X2RpciA9ICJ0ZXN0IgoKIyDguYDguJ7guLTguYjguKEgRGF0YSBBdWdtZW50YXRpb24KdHJhaW5fZGF0YWdlbiA9IEltYWdlRGF0YUdlbmVyYXRvcigKICAgIHJlc2NhbGU9MS4vMjU1LAogICAgcm90YXRpb25fcmFuZ2U9MzAsCiAgICB3aWR0aF9zaGlmdF9yYW5nZT0wLjIsCiAgICBoZWlnaHRfc2hpZnRfcmFuZ2U9MC4yLAogICAgc2hlYXJfcmFuZ2U9MC4yLAogICAgem9vbV9yYW5nZT0wLjIsCiAgICBob3Jpem9udGFsX2ZsaXA9VHJ1ZSwKICAgIHZlcnRpY2FsX2ZsaXA9VHJ1ZSwgICMg4LmA4Lie4Li04LmI4Lih4LiB4Liy4Lij4Lie4Lil4Li04LiB4LmD4LiZ4LmB4LiZ4Lin4LiV4Lix4LmJ4LiHCiAgICBicmlnaHRuZXNzX3JhbmdlPVswLjgsIDEuMl0sICAjIOC4m+C4o+C4seC4muC4hOC4p+C4suC4oeC4quC4p+C5iOC4suC4hwogICAgZmlsbF9tb2RlPSduZWFyZXN0JwopCgp2YWxpZGF0aW9uX2RhdGFnZW4gPSBJbWFnZURhdGFHZW5lcmF0b3IocmVzY2FsZT0xLi8yNTUpCgojIOC5guC4q+C4peC4lOC4guC5ieC4reC4oeC4ueC4pQp0cmFpbl9nZW5lcmF0b3IgPSB0cmFpbl9kYXRhZ2VuLmZsb3dfZnJvbV9kaXJlY3RvcnkoCiAgICB0cmFpbl9kaXIsCiAgICB0YXJnZXRfc2l6ZT0oMTUwLCAxNTApLAogICAgYmF0Y2hfc2l6ZT0zMiwKICAgIGNsYXNzX21vZGU9J2JpbmFyeScKKQoKdmFsaWRhdGlvbl9nZW5lcmF0b3IgPSB2YWxpZGF0aW9uX2RhdGFnZW4uZmxvd19mcm9tX2RpcmVjdG9yeSgKICAgIHZhbGlkYXRpb25fZGlyLAogICAgdGFyZ2V0X3NpemU9KDE1MCwgMTUwKSwKICAgIGJhdGNoX3NpemU9MzIsCiAgICBjbGFzc19tb2RlPSdiaW5hcnknCikKCiMg4Lib4Lij4Lix4Lia4Lib4Lij4Li44LiH4LmC4LiE4Lij4LiH4Liq4Lij4LmJ4Liy4LiH4LmC4Lih4LmA4LiU4LilCm1vZGVsID0ga2VyYXMuU2VxdWVudGlhbChbCiAgICBsYXllcnMuQ29udjJEKDMyLCAoMywgMyksIGFjdGl2YXRpb249J3JlbHUnLCBpbnB1dF9zaGFwZT0oMTUwLCAxNTAsIDMpKSwKICAgIGxheWVycy5CYXRjaE5vcm1hbGl6YXRpb24oKSwgICMg4LmA4Lie4Li04LmI4LihIEJhdGNoIE5vcm1hbGl6YXRpb24KICAgIGxheWVycy5NYXhQb29saW5nMkQoMiwgMiksCiAgICBsYXllcnMuRHJvcG91dCgwLjI1KSwgICMg4LmA4Lie4Li04LmI4LihIERyb3BvdXQKCiAgICBsYXllcnMuQ29udjJEKDY0LCAoMywgMyksIGFjdGl2YXRpb249J3JlbHUnKSwKICAgIGxheWVycy5CYXRjaE5vcm1hbGl6YXRpb24oKSwKICAgIGxheWVycy5NYXhQb29saW5nMkQoMiwgMiksCiAgICBsYXllcnMuRHJvcG91dCgwLjI1KSwKCiAgICBsYXllcnMuQ29udjJEKDEyOCwgKDMsIDMpLCBhY3RpdmF0aW9uPSdyZWx1JyksCiAgICBsYXllcnMuQmF0Y2hOb3JtYWxpemF0aW9uKCksCiAgICBsYXllcnMuTWF4UG9vbGluZzJEKDIsIDIpLAogICAgbGF5ZXJzLkRyb3BvdXQoMC4yNSksCgogICAgbGF5ZXJzLkZsYXR0ZW4oKSwKICAgIGxheWVycy5EZW5zZSgxMjgsIGFjdGl2YXRpb249J3JlbHUnKSwKICAgIGxheWVycy5CYXRjaE5vcm1hbGl6YXRpb24oKSwKICAgIGxheWVycy5Ecm9wb3V0KDAuNSksICAjIOC5gOC4nuC4tOC5iOC4oSBEcm9wb3V0IOC5g+C4mSBGdWxseSBDb25uZWN0ZWQgTGF5ZXIKICAgIGxheWVycy5EZW5zZSgxLCBhY3RpdmF0aW9uPSdzaWdtb2lkJykKXSkKCiMg4LiB4Liz4Lir4LiZ4LiU4Lit4Lit4Lib4LiV4Li04LmE4Lih4LmA4LiL4Lit4Lij4LmM4LmB4Lil4Liw4LiE4LmI4Liy4Lie4Liy4Lij4Liy4Lih4Li04LmA4LiV4Lit4Lij4LmMCm1vZGVsLmNvbXBpbGUoCiAgICBvcHRpbWl6ZXI9a2VyYXMub3B0aW1pemVycy5BZGFtKGxlYXJuaW5nX3JhdGU9MC4wMDEpLAogICAgbG9zcz0nYmluYXJ5X2Nyb3NzZW50cm9weScsCiAgICBtZXRyaWNzPVsnYWNjdXJhY3knXQopCgojIOC5g+C4iuC5iSBSZWR1Y2VMUk9uUGxhdGVhdSDguYDguJ7guLfguYjguK3guKXguJQgTGVhcm5pbmcgUmF0ZSDguYDguKHguLfguYjguK3guYLguKHguYDguJTguKXguYDguKPguLTguYjguKHguKvguKLguLjguJTguJ7guLHguJLguJnguLIKcmVkdWNlX2xyID0ga2VyYXMuY2FsbGJhY2tzLlJlZHVjZUxST25QbGF0ZWF1KAogICAgbW9uaXRvcj0ndmFsX2xvc3MnLAogICAgZmFjdG9yPTAuNSwKICAgIHBhdGllbmNlPTMsCiAgICBtaW5fbHI9MWUtNgopCgojIOC4neC4tuC4geC5guC4oeC5gOC4lOC4pQpoaXN0b3J5ID0gbW9kZWwuZml0KAogICAgdHJhaW5fZ2VuZXJhdG9yLAogICAgdmFsaWRhdGlvbl9kYXRhPXZhbGlkYXRpb25fZ2VuZXJhdG9yLAogICAgZXBvY2hzPTMwLAogICAgY2FsbGJhY2tzPVtyZWR1Y2VfbHJdCikKCiMg4LmB4Liq4LiU4LiH4LiB4Lij4Liy4Lif4Lic4Lil4Lil4Lix4Lie4LiY4LmMCnBsdC5maWd1cmUoZmlnc2l6ZT0oMTIsIDQpKQpwbHQuc3VicGxvdCgxLCAyLCAxKQpwbHQucGxvdChoaXN0b3J5Lmhpc3RvcnlbJ2FjY3VyYWN5J10sIGxhYmVsPSdUcmFpbiBBY2N1cmFjeScpCnBsdC5wbG90KGhpc3RvcnkuaGlzdG9yeVsndmFsX2FjY3VyYWN5J10sIGxhYmVsPSdWYWxpZGF0aW9uIEFjY3VyYWN5JykKcGx0LmxlZ2VuZCgpCnBsdC50aXRsZSgnQWNjdXJhY3knKQoKcGx0LnN1YnBsb3QoMSwgMiwgMikKcGx0LnBsb3QoaGlzdG9yeS5oaXN0b3J5Wydsb3NzJ10sIGxhYmVsPSdUcmFpbiBMb3NzJykKcGx0LnBsb3QoaGlzdG9yeS5oaXN0b3J5Wyd2YWxfbG9zcyddLCBsYWJlbD0nVmFsaWRhdGlvbiBMb3NzJykKcGx0LmxlZ2VuZCgpCnBsdC50aXRsZSgnTG9zcycpCgpwbHQuc2hvdygp
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
# ตั้งค่าไดเรกทอรีของข้อมูล
train_dir = "train"
validation_dir = "val"
test_dir = "test"
# เพิ่ม Data Augmentation
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True, # เพิ่มการพลิกในแนวตั้ง
brightness_range=[0.8, 1.2], # ปรับความสว่าง
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1./255)
# โหลดข้อมูล
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary'
)
# ปรับปรุงโครงสร้างโมเดล
model = keras.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)),
layers.BatchNormalization(), # เพิ่ม Batch Normalization
layers.MaxPooling2D(2, 2),
layers.Dropout(0.25), # เพิ่ม Dropout
layers.Conv2D(64, (3, 3), activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(2, 2),
layers.Dropout(0.25),
layers.Conv2D(128, (3, 3), activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(2, 2),
layers.Dropout(0.25),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.5), # เพิ่ม Dropout ใน Fully Connected Layer
layers.Dense(1, activation='sigmoid')
])
# กำหนดออปติไมเซอร์และค่าพารามิเตอร์
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss='binary_crossentropy',
metrics=['accuracy']
)
# ใช้ ReduceLROnPlateau เพื่อลด Learning Rate เมื่อโมเดลเริ่มหยุดพัฒนา
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=3,
min_lr=1e-6
)
# ฝึกโมเดล
history = model.fit(
train_generator,
validation_data=validation_generator,
epochs=30,
callbacks=[reduce_lr]
)
# แสดงกราฟผลลัพธ์
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Train Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.legend()
plt.title('Accuracy')
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.legend()
plt.title('Loss')
plt.show()