이거 정확도가 0.79위로는 올라가지 않는데, 아마도 정상이라고 생각합니다.
사람이 색에 대해 느끼는 감각이 매우 주관적이니까요.
#!/usr/bin/env python
# coding: utf-8
# In[38]:
#0. 사용할 패키지 불러오기
# multi-class classification with Keras
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from tensorflow import keras
import tensorflow as tf
import seaborn as sns
import matplotlib as plt
# In[39]:
# load dataset
dataframe = pandas.read_csv("color_dataset.csv", header=None)
dataframe.head(3)
# In[ ]:
# In[40]:
rows, cols = dataframe.shape
train_dataset = dataframe.sample(frac=0.8,random_state=0)
test_dataset = dataframe.drop(train_dataset.index)
tempframe = dataframe.iloc[:, :cols-1]
train_stats = tempframe.describe()
print(train_stats)
train_stats = train_stats.transpose()
print(train_stats)
# label index
lidx = str(cols-1)
print(lidx)
# In[41]:
train_labels = train_dataset.iloc[:,cols-1] # pop(cols-1) # '3'
test_labels = test_dataset.iloc[:,cols-1] # '3'
all_labels = dataframe.iloc[:, cols-1]
print(train_labels.head(3))
print(test_labels.head(3))
print(all_labels.head(3))
train_dataset = train_dataset.drop([cols-1],axis=1)
print(train_dataset.head(3))
test_dataset = test_dataset.drop([cols-1], axis=1)
print(test_dataset.head(3))
# In[ ]:
# In[42]:
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(all_labels)
encoded_Y = encoder.transform(train_labels)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y = np_utils.to_categorical(encoded_Y) # this perform one-hot encoding...
print(encoded_Y[:3])
print(dummy_y[:3])
# convert classes to class number, then make one-hot encoded matrix
enc_test = encoder.transform(test_labels)
test_y = np_utils.to_categorical(enc_test)
# In[59]:
input_count = cols - 1
class_count = len(encoder.classes_)
print(input_count)
print(class_count)
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
# In[63]:
# define baseline model
def model_1():
# create model
model = Sequential()
model.add(Dense(8, input_dim=input_count, activation='relu'))
model.add(Dense(class_count, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def model_2():
# create model
model = Sequential()
model.add(Dense(32, input_dim=input_count, activation='relu'))
model.add(Dense(32, input_dim=input_count, activation='relu'))
model.add(Dense(class_count, activation='softmax'))
# Compile model
adam = tf.keras.optimizers.Adam(learning_rate=0.01) #, beta_1=0.9, beta_2=0.999, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
#adam = tf.keras.optimizers.Adam(learning_rate=0.02)
#model.compile(loss='mse', optimizer=adam, metrics=['accuracy'])
return model
# In[64]:
model = model_2()
#model.fit(train_data, encoded_Y, epochs=100, validation_split = 0.2, verbose=0, callbacks=[PrintDot()])
history = model.fit(normed_train_data, dummy_y, epochs=5000, batch_size=20000, validation_split = 0.2)
# In[65]:
#6. 모델 평가하기
scores = model.evaluate(normed_test_data,test_y)
print(scores)
# In[68]:
fig, loss_ax=plt.pyplot.subplots()
loss_ax.plot(history.history['loss'], 'y',label='train_loss')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
loss_ax.legend(loc = 'upper left')
plt.pyplot.show()
'TensorFlow OpenCV' 카테고리의 다른 글
| Keras Regression (1) | 2019.12.06 |
|---|---|
| PyCharm new project + interpreter (0) | 2019.11.11 |
| jupyter notebook + tensorflow (0) | 2019.11.10 |
| CUDA10.0, cuDNN, Anaconda, Pycharm (0) | 2019.10.30 |
| python file example 개행문자 \r 없애기 (0) | 2019.10.05 |