下载模拟器:https://github.com/udacity/self-driving-car-sim 数据集地址:https://github.com/rslim087a/track 测试时,先打开模拟器,在运行代码 加载数据以及统计数据分布代码
columns = ['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed'] data = pd.read_csv(os.path.join(datadir, 'driving_log.csv'), names=columns) pd.set_option('display.max_colwidth', -1) data['center'] = data['center'].apply(path_leaf) data['left'] = data['left'].apply(path_leaf) data['right'] = data['right'].apply(path_leaf) data.head() num_bins = 25 samples_per_bin = 200 hist, bins = np.histogram(data['steering'], num_bins) center = (bins[:-1] + bins[1:]) * 0.5 # print(bins) # plt.bar(center, hist, width=0.05) # plt.plot((np.min(data['steering']), np.max(data['steering'])), (samples_per_bin, samples_per_bin))数据均衡部分代码
print('total_data:', len(data)) # print(data['steering']) remove_list = [] for j in range(num_bins): list_ = [] for i in range(len(data['steering'])): if data['steering'][i] >= bins[j] and data['steering'][i] <= bins[j + 1]: list_.append(i) # print(bins[j] ,bins[j+1]) # print('the number of this:',len(list_)) list_ = shuffle(list_) list_ = list_[samples_per_bin:] # print(list_) remove_list.extend(list_) b = remove_list # print(len(remove_list)) # print(remove_list) print('removed:', len(remove_list)) data.drop(data.index[remove_list], inplace=True) print('remaining:', len(data))数据集划分部分代码:
# 划分数据集,训练集与测试集 image_paths, steerings = load_img_steering(datadir + '/IMG', data) X_train, X_valid, y_train, y_valid = train_test_split(image_paths, steerings, test_size=0.2, random_state=6) print('Training Samples:{}\nValid Samples:{}'.format(len(X_train), len(X_valid))) # 显示测试集和训练集的分布 # fig, axes = plt.subplots(1, 2, figsize=(12, 4)) # axes[0].hist(y_train, bins=num_bins, width=0.05, color="blue") # axes[0].set_title("Training set") # axes[1].hist(y_valid, bins=num_bins, width=0.05, color="red") # axes[1].set_title("Validation set") # plt.show() # prepocessing Image # image = image_paths[100] # original_image = mpimg.imread(image) # preprocessed_image = img_preprocess(image) X_train = np.array(list(map(img_preprocess, X_train))) X_valid = np.array(list(map(img_preprocess, X_valid))) # fig, axs = plt.subplots(1, 2, figsize=(15, 10)) # fig.tight_layout() # axs[0].imshow(original_image) # axs[0].set_title('Original Image') # axs[1].imshow(preprocessed_image) # axs[1].set_title('Preprocessed_image') # plt.show()网络模型代码:
def nvidia_model(): model = Sequential() model.add(Convolution2D(24, 5, 5, subsample=(2, 2), input_shape=(66, 200, 3), activation='relu')) model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu')) model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu')) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(100)) model.add(Dropout(0.5)) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(1)) optimizer = Adam(lr=1e-3) model.compile(loss='mse', optimizer=optimizer) return model训练
history = model.fit(X_train, y_train, epochs=30, validation_data=(X_valid, y_valid), batch_size=100, verbose=1, shuffle=1)绘制学习曲线:
plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['training', 'validation']) plt.title('Loss') plt.xlabel('Epoch') plt.show()模型保存 model.save('./models/' + 'model.h5')
使用该数据集以及NVIDIA的模型能较好地完成在训练地图上的驾驶,效果良好,小车基本保持在 车道能航行 ,但是泛化性能 不够,在新地图上测试时,容易偏离车道,发生碰撞。在以上的场景中航行时,场景中均没有其他车辆,所以以后可考虑怎么样增强驾驶的泛化性能
全部代码:
import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import keras from keras.models import Sequential from keras.optimizers import Adam from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Convolution2D import cv2 import pandas as pd import random import os import ntpath from sklearn.utils import shuffle from sklearn.model_selection import train_test_split def path_leaf(path): head, tail = ntpath.split(path) return tail def load_img_steering(datadir, data): image_path = [] steering = [] for i in range(len(data)): indexed_data = data.iloc[i] # 提取制定行号的数据 center, left, right = indexed_data[0], indexed_data[1], indexed_data[2] image_path.append(os.path.join(datadir, center.strip())) steering.append(float(indexed_data[3])) image_paths = np.asarray(image_path) steerings = np.asarray(steering) return image_paths, steerings def img_preprocess(img): img = mpimg.imread(img) img = img[60:135, :, :] img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV) img = cv2.GaussianBlur(img, (3, 3), 0) img = cv2.resize(img, (200, 66)) img = img/255 return img def nvidia_model(): model = Sequential() model.add(Convolution2D(24, 5, 5, subsample=(2, 2), input_shape=(66, 200, 3), activation='relu')) model.add(Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu')) model.add(Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu')) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(Convolution2D(64, 3, 3, activation='relu')) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(100)) model.add(Dropout(0.5)) model.add(Dense(50)) model.add(Dense(10)) model.add(Dense(1)) optimizer = Adam(lr=1e-3) model.compile(loss='mse', optimizer=optimizer) return model if __name__ == '__main__': datadir = 'track' columns = ['center', 'left', 'right', 'steering', 'throttle', 'reverse', 'speed'] data = pd.read_csv(os.path.join(datadir, 'driving_log.csv'), names=columns) pd.set_option('display.max_colwidth', -1) data['center'] = data['center'].apply(path_leaf) data['left'] = data['left'].apply(path_leaf) data['right'] = data['right'].apply(path_leaf) data.head() num_bins = 25 samples_per_bin = 200 hist, bins = np.histogram(data['steering'], num_bins) center = (bins[:-1] + bins[1:]) * 0.5 # print(bins) # plt.bar(center, hist, width=0.05) # plt.plot((np.min(data['steering']), np.max(data['steering'])), (samples_per_bin, samples_per_bin)) print('total_data:', len(data)) # print(data['steering']) remove_list = [] for j in range(num_bins): list_ = [] for i in range(len(data['steering'])): if data['steering'][i] >= bins[j] and data['steering'][i] <= bins[j + 1]: list_.append(i) # print(bins[j] ,bins[j+1]) # print('the number of this:',len(list_)) list_ = shuffle(list_) list_ = list_[samples_per_bin:] # print(list_) remove_list.extend(list_) b = remove_list # print(len(remove_list)) # print(remove_list) print('removed:', len(remove_list)) data.drop(data.index[remove_list], inplace=True) print('remaining:', len(data)) # hist, _ = np.histogram(data['steering'], (num_bins)) # plt.bar(center, hist, width=0.05) # plt.plot((np.min(data['steering']), np.max(data['steering'])), (samples_per_bin, samples_per_bin)) # plt.show() # 划分数据集,训练集与测试集 image_paths, steerings = load_img_steering(datadir + '/IMG', data) X_train, X_valid, y_train, y_valid = train_test_split(image_paths, steerings, test_size=0.2, random_state=6) print('Training Samples:{}\nValid Samples:{}'.format(len(X_train), len(X_valid))) # 显示测试集和训练集的分布 # fig, axes = plt.subplots(1, 2, figsize=(12, 4)) # axes[0].hist(y_train, bins=num_bins, width=0.05, color="blue") # axes[0].set_title("Training set") # axes[1].hist(y_valid, bins=num_bins, width=0.05, color="red") # axes[1].set_title("Validation set") # plt.show() # prepocessing Image # image = image_paths[100] # original_image = mpimg.imread(image) # preprocessed_image = img_preprocess(image) X_train = np.array(list(map(img_preprocess, X_train))) X_valid = np.array(list(map(img_preprocess, X_valid))) # fig, axs = plt.subplots(1, 2, figsize=(15, 10)) # fig.tight_layout() # axs[0].imshow(original_image) # axs[0].set_title('Original Image') # axs[1].imshow(preprocessed_image) # axs[1].set_title('Preprocessed_image') # plt.show() model = nvidia_model() print(model.summary()) history = model.fit(X_train, y_train, epochs=30, validation_data=(X_valid, y_valid), batch_size=100, verbose=1, shuffle=1) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.legend(['training', 'validation']) plt.title('Loss') plt.xlabel('Epoch') plt.show() model.save('./models/' + 'model.h5')