from IPython.core.pylabtools import figsize
from tensorflow.tools.docs.doc_controls import T
import os
import random
import fnmatch
import datetime
import pickle
import numpy as np
np.set_printoptions(formatter={' float_kind':lambda x:"%.4f" %x})
import pandas as pd
pd.set_option('display.width', 300)
pd.set_option('display.float_format', '{:,.4f}'.format)
pd.set_option('display.max_colwidth', 200)
import tensorflow as tf
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, Flatten, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
print(f'tf.__version__: {tf.__version__}')
print(f'keras.__version__: {tensorflow.keras.__version__}')
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import cv2
from imgaug import augmenters as img_aug
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
from PIL import Image
data_dir = '/content/video'
file_list = os.listdir(data_dir)
image_paths=[]
steering_angles = []
pattern = "*.png"
for filename in file_list:
if fnmatch.fnmatch(filename, pattern):
image_paths.append(os.path.join(data_dir,filename))
angle = int(filename[-7:-4])
steering_angles.append(angle)
image_index = 20
plt.imshow(Image.open(image_paths[image_index]))
print("image_path: %s" % image_paths[image_index])
print("steering_Angle: %d" % steering_angles[image_index])
df = pd.DataFrame()
df['ImagePath'] = image_paths
df['Angle'] = steering_angles
num_of_bins = 25
hist, bins = np.histogram(df['Angle'], num_of_bins)
fig, axes = plt.subplots(1,1, figsize=(12,4))
axes.hist(df['Angle'], bins=num_of_bins, width=1, color='blue')
X_train, X_valid, y_train, y_valid = train_test_split(image_paths, steering_angles, test_size=0.2 )
print("Training data: %d\nValidation data: %d" % (len(X_train), len(X_valid)))
fig, axes = plt.subplots(1,2, figsize=(12,4))
axes[0].hist(y_train, bins=num_of_bins, width=1, color='blue')
axes[0].set_title('Training Data')
axes[0].hist(y_train, bins=num_of_bins, width=1, color='red')
axes[0].set_title('Validation Data')
def my_imread(image_path):
image = cv2.imread(image_path)
return image
def img_preprocess(image):
image = image / 255
return image
fig, axes = plt.subplots(1,2, figsize=(15,10))
image_orig = my_imread(image_paths[image_index])
image_processed = img_preprocess(image_orig)
axes[0].imshow(image_orig)
axes[0].set_title("orig")
axes[1].imshow(image_processed)
axes[1].set_title("processed")
def nvidia_model():
model = Sequential(name='Nvidia_Model')
model.add(Conv2D(24,(5,5), strides=(2,2), input_shape=(66,200,3), activation='elu' ))
model.add(Conv2D(36,(5,5), strides=(2,2), activation='elu'))
model.add(Conv2D(48,(5,5), strides=(2,2), activation='elu'))
model.add(Dropout(0.2))
model.add(Conv2D(64,(3,3), activation='elu'))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(100, activation='elu'))
model.add(Dense(50, activation='elu'))
model.add(Dense(10, activation='elu'))
model.add(Dense(1))
optimizer = Adam(lr=1e-3)
model.compile(loss='mse', optimizer=optimizer)
return model
model = nvidia_model()
print(model.summary())
def image_data_generator(image_paths, steering_angles, batch_size):
while True:
batch_images=[]
batch_steering_angles=[]
for i in range(batch_size):
random_index = random.randint(0, len(image_paths) - 1 )
image_path = image_paths[random_index]
image = my_imread(image_paths[random_index])
steering_angle = steering_angles[random_index]
image = img_preprocess(image)
batch_images.append(image)
batch_steering_angles.append(steering_angle)
yield(np.asarray(batch_images), np.asarray(batch_steering_angles))
ncol=2
nrow=2
X_train_batch, y_train_batch= next(image_data_generator(X_train, y_train, nrow))
X_valid_batch, y_valid_batch= next(image_data_generator(X_valid, y_valid, nrow))
fig, axes=plt.subplots(nrow, ncol, figsize=(15,6))
fig.tight_layout()
for i in range(nrow):
axes[i][0].imshow(X_train_batch[i])
axes[i][0].set_title("training, angle=%s" % y_train_batch[i])
axes[i][1].imshow(X_valid_batch[i])
axes[i][1].set_title("validation, angle=%s" % y_valid_batch[i])
model_output_dir = "/content/drive/MyDrive/ColabNotebooks/인공지능자동차데이터"
checkpoint_callback = tensorflow.keras.callbacks.ModelCheckpoint(filepath=os.path.join(model_output_dir, 'lane_navigation_check.h5'), verbose=1, save_best_onl=True)
history = model.fit_generator(image_data_generator(X_train, y_train, batch_size=100), steps_per_epoch=300,epochs=10,validation_data=image_data_generator(X_valid, y_valid, batch_size=100),validation_steps=200,verbose=1,shuffle=1,callbacks=[checkpoint_callback])
model.save(os.path.join(model_output_dir, 'lane_navigation_final.h5'))
history_path = os.path.join(model_output_dir, 'history.pickle')
with open(history_path, 'wb') as f:
pickle.dump(history.history, f, pickle.HIGHEST_PROTOCOL)
데이터 학습을 위한 코드