import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
tf.random.set_seed(777) # for reproducibility
print(tf.__version__)
# XOR : 00 -> 0, 01 -> 1, 10 -> 1, 11 -> 0
x_data = [[0, 0],[0, 1],[1, 0],[1, 1]]
y_data = [[0] , [1], [1], [0]]
plt.scatter(x_data[0][0],x_data[0][1], c='red' , marker='^')
plt.scatter(x_data[3][0],x_data[3][1], c='orange' , marker='^')
plt.scatter(x_data[1][0],x_data[1][1], c='blue' , marker='^')
plt.scatter(x_data[2][0],x_data[2][1], c='green' , marker='^')
plt.xlabel("x1")
plt.ylabel("x2")
plt.show()
# Tensorflow data API를 통해 학습시킬 값들을 담는다 (Batch Size는 한번에 학습시킬 Size로 정한다)
# preprocess function으로 features,labels는 실재 학습에 쓰일 Data 연산을 위해 Type를 맞춰준다
dataset = tf.data.Dataset.from_tensor_slices((x_data, y_data)).batch(len(x_data))
def preprocess_data(features, labels):
features = tf.cast(features, tf.float32)
labels = tf.cast(labels, tf.float32)
return features, labels
# Weight Bias 초기화
W = tf.Variable(tf.zeros((2,1)), name='weight')
b = tf.Variable(tf.zeros((1,)), name='bias')
print("W = {}, B = {}".format(W.numpy(), b.numpy()))
# Sigmoid를 가설로 선언
# 0과 1의 값만을 리턴 : tf.sigmoid(tf.matmul(X, W) + b)
def logistic_regression(features):
hypothesis = tf.divide(1., 1. + tf.exp(tf.matmul(features, W) + b))
return hypothesis
# Cost 함수 정의
def loss_fn(hypothesis, features, labels):
cost = -tf.reduce_mean(labels * tf.math.log(logistic_regression(features)) + (1 - labels) * tf.math.log(1 - hypothesis))
return cost
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
# Sigmoid 함수를 통해 예측값이 0.5보다 크면 1을 반환하고 0.5보다 작으면 0으로 반환
def accuracy_fn(hypothesis, labels):
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, labels), dtype=tf.float32))
return accuracy
# GradientTape을 통해 경사값을 계산
def grad(hypothesis, features, labels):
with tf.GradientTape() as tape:
loss_value = loss_fn(logistic_regression(features),features,labels)
return tape.gradient(loss_value, [W,b])
# epoch 1000회
EPOCHS = 1001
# epoch에서 1000회 step
for step in range(EPOCHS):
# x, y
for features, labels in dataset:
# features, labels를 받아 float을 맞춤
features, labels = preprocess_data(features, labels)
# gradient tape
grads = grad(logistic_regression(features), features, labels)
optimizer.apply_gradients(grads_and_vars=zip(grads,[W,b]))
# Verbose 100
if step % 100 == 0:
print("Iter: {}, Loss: {:.4f}".format(step, loss_fn(logistic_regression(features),features,labels)))
print("W = {}, B = {}".format(W.numpy(), b.numpy()))
x_data, y_data = preprocess_data(x_data, y_data)
test_acc = accuracy_fn(logistic_regression(x_data),y_data)
print("Testset Accuracy: {:.4f}".format(test_acc))
plt.scatter(x_data[0][0],x_data[0][1], c='red' , marker='^')
plt.scatter(x_data[3][0],x_data[3][1], c='orange' , marker='^')
plt.scatter(x_data[1][0],x_data[1][1], c='blue' , marker='^')
plt.scatter(x_data[2][0],x_data[2][1], c='green' , marker='^')
plt.plot(x_data,logistic_regression(x_data), c='gray')
plt.tick_params(axis='y', colors="white")
plt.tick_params(axis='x', colors='white')
plt.xlabel("x1",color="white")
plt.ylabel("x2",color="white")
plt.show()
# W1,b1,W2,b2,W3,b3 정의
W1 = tf.Variable(tf.random.normal((2, 1)), name='weight1')
b1 = tf.Variable(tf.random.normal((1,)), name='bias1')
W2 = tf.Variable(tf.random.normal((2, 1)), name='weight2')
b2 = tf.Variable(tf.random.normal((1,)), name='bias2')
W3 = tf.Variable(tf.random.normal((2, 1)), name='weight3')
b3 = tf.Variable(tf.random.normal((1,)), name='bias3')
# layer1 : sigmoid ─┬─> layer3
# layer2 : sigmoid --┘
# hypothesis 가설
def neural_net(features):
layer1 = tf.sigmoid(tf.matmul(features, W1) + b1)
layer2 = tf.sigmoid(tf.matmul(features, W2) + b2)
layer3 = tf.concat([layer1, layer2],-1)
layer3 = tf.reshape(layer3, shape = [-1,2])
hypothesis = tf.sigmoid(tf.matmul(layer3, W3) + b3)
return hypothesis
# 손실함수
def loss_fn(hypothesis, labels):
cost = -tf.reduce_mean(labels * tf.math.log(hypothesis) + (1 - labels) * tf.math.log(1 - hypothesis))
return cost
# Optimizer SGD
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
# Accuracy 측정
def accuracy_fn(hypothesis, labels):
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, labels), dtype=tf.float32))
return accuracy
# Gradient Tape
def grad(hypothesis, features, labels):
with tf.GradientTape() as tape:
loss_value = loss_fn(neural_net(features),labels)
return tape.gradient(loss_value, [W1, W2, W3, b1, b2, b3])
# epoch 50000
EPOCHS = 50000
for step in range(EPOCHS):
for features, labels in dataset:
# preprocess 처리
features, labels = preprocess_data(features, labels)
# neural_net : layer1 + layer2 -> layer3
grads = grad(neural_net(features), features, labels)
# optimizer
optimizer.apply_gradients(grads_and_vars=zip(grads,[W1, W2, W3, b1, b2, b3]))
# verbose 5000
if step % 5000 == 0:
print("Iter: {}, Loss: {:.4f}".format(step, loss_fn(neural_net(features),labels)))
x_data, y_data = preprocess_data(x_data, y_data)
# test data
test_acc = accuracy_fn(neural_net(x_data),y_data)
print("Testset Accuracy: {:.4f}".format(test_acc))
plt.scatter(x_data[0][0],x_data[0][1], c='red' , marker='^')
plt.scatter(x_data[3][0],x_data[3][1], c='orange' , marker='^')
plt.scatter(x_data[1][0],x_data[1][1], c='blue' , marker='^')
plt.scatter(x_data[2][0],x_data[2][1], c='green' , marker='^')
plt.plot(x_data,neural_net(x_data), c='gray')
plt.tick_params(axis='y', colors="white")
plt.tick_params(axis='x', colors='white')
plt.xlabel("x1",color="white")
plt.ylabel("x2",color="white")
plt.show()
# XOR 문제를 Deep Neural Network 활용 풀이
# dataset
dataset = tf.data.Dataset.from_tensor_slices((x_data, y_data)).batch(len(x_data))
nb_classes = 10
class wide_deep_nn():
# 초기화
def __init__(self, nb_classes):
# 초기화
super(wide_deep_nn, self).__init__()
# W1,b1,W2,b2,W3,b3,W4,b4 정의
self.W1 = tf.Variable(tf.random.normal((2, nb_classes)), name='weight1')
self.b1 = tf.Variable(tf.random.normal((nb_classes,)), name='bias1')
self.W2 = tf.Variable(tf.random.normal((nb_classes, nb_classes)), name='weight2')
self.b2 = tf.Variable(tf.random.normal((nb_classes,)), name='bias2')
self.W3 = tf.Variable(tf.random.normal((nb_classes, nb_classes)), name='weight3')
self.b3 = tf.Variable(tf.random.normal((nb_classes,)), name='bias3')
self.W4 = tf.Variable(tf.random.normal((nb_classes,1)), name='weight4')
self.b4 = tf.Variable(tf.random.normal((1,)), name='bias4')
self.variables = [self.W1, self.b1,self.W2, self.b2, self.W3, self.b3, self.W4, self.b4]
# data preprocessing
def preprocess_data(self, features, labels):
features = tf.cast(features, tf.float32)
labels = tf.cast(labels, tf.float32)
return features, labels
# 4Layer의 Neural Network를 통해 학습시킨 후 모델을 생성
# layer1->layer2->layer3->hypothesis
def deep_nn(self, features):
layer1 = tf.sigmoid(tf.matmul(features, self.W1)+self.b1)
layer2 = tf.sigmoid(tf.matmul(layer1, self.W2)+self.b2)
layer3 = tf.sigmoid(tf.matmul(layer2, self.W3)+self.b3)
hypothesis = tf.sigmoid(tf.matmul(layer3, self.W4)+self.b4)
return hypothesis
# loss function
def loss_fn(self, hypothesis, features, labels):
cost = -tf.reduce_mean(labels*tf.math.log(hypothesis)+(1-labels)*tf.math.log(1-hypothesis))
return cost
# accuracy function
def accuracy_fn(self, hypothesis,labels):
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, labels), dtype=tf.float32))
return accuracy
# Gradient Tape
def grad(self, hypothesis, features, labels):
with tf.GradientTape() as tape:
loss_value=self.loss_fn(self.deep_nn(features),features,labels)
return tape.gradient(loss_value,self.variables)
# epochs 2000, verbose 500
def fit(self, dataset, EPOCHS=20000, verbose=500):
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
for step in range(EPOCHS):
for features, labels in dataset:
features, labels = self.preprocess_data(features, labels)
grads = self.grad(self.deep_nn(features),features,labels)
optimizer.apply_gradients(grads_and_vars=zip(grads, self.variables))
if step % verbose == 0:
print("Iter: {}, Loss: {:.4f}".format(step, self.loss_fn(self.deep_nn(features),features,labels)))
# Model Test
def test_model(self,x_data,y_data):
# Test Data preprocessing
x_data,y_data = self.preprocess_data(x_data,y_data)
# Accuracy check
test_acc = self.accuracy_fn(self.deep_nn(x_data),y_data)
# Accuracy Printing
print("Testset Accuracy: {:.4f}".format(test_acc))
# model 정의
model = wide_deep_nn(nb_classes)
# fitting
model.fit(dataset)
# test data accuracy
model.test_model(x_data, y_data)
# dataset 정의
dataset = tf.data.Dataset.from_tensor_slices((x_data,y_data)).batch(len(x_data))
# data preprocessing 정의
def preprocess_data(features, labels):
features = tf.cast(features, tf.float32)
labels = tf.cast(labels, tf.float32)
return features, labels
# summary 값을 logs폴더에 저장하고 아래 명령어로 실행해서 확인
# tensorboard --logdir=./logs/xor
log_path = "./logs/xor"
writer = tf.summary.create_file_writer(log_path)
# 위의 Data를 4Layer의 Neural Network를 통해 학습시킨 후 모델을 생성
# 각각의 값을 histogram으로 tensorboard에 저장 (Model)
# 각각의 값을 scalar값으로 tensorboard에 저장 (cost, accuracy)
# W1,b1,W2,b2,W3,b3,W4,b4 정의 및 선언
W1 = tf.Variable(tf.random.normal((2,10)), name='weight1')
b1 = tf.Variable(tf.random.normal((10,)), name='bias1')
W2 = tf.Variable(tf.random.normal((10,10)),name='weight2')
b2 = tf.Variable(tf.random.normal((10,)),name='bias2')
W3 = tf.Variable(tf.random.normal((10,10)),name='weight3')
b3 = tf.Variable(tf.random.normal((10,)),name='bias3')
W4 = tf.Variable(tf.random.normal((10,1)), name='weight4')
b4 = tf.Variable(tf.random.normal((1,)), name='bias4')
# layer1 -> layer2 -> layer3 -> hypothesis
def neural_net(features, step):
layer1 = tf.sigmoid(tf.matmul(features, W1) + b1)
layer2 = tf.sigmoid(tf.matmul(layer1, W2) + b2)
layer3 = tf.sigmoid(tf.matmul(layer2, W3) + b3)
hypothesis = tf.sigmoid(tf.matmul(layer3,W4) + b4)
# tensorboard에 저장
with writer.as_default():
tf.summary.histogram("weights1",W1,step=step)
tf.summary.histogram("biases1",b1,step=step)
tf.summary.histogram("layer1",layer1,step=step)
tf.summary.histogram("weights2",W2,step=step)
tf.summary.histogram("biases2",b2,step=step)
tf.summary.histogram("layer2",layer2,step=step)
tf.summary.histogram("weight3",W3,step=step)
tf.summary.histogram("biases3",b3,step=step)
tf.summary.histogram("layer3",layer3,step=step)
tf.summary.histogram("weights4",W4,step=step)
tf.summary.histogram("biases4",b4, step=step)
tf.summary.histogram("hypothesis",hypothesis,step=step)
return hypothesis
# loss function
def loss_fn(hypothesis,labels):
cost = -tf.reduce_mean(labels * tf.math.log(hypothesis) + (1-labels) * tf.math.log(1-hypothesis))
with writer.as_default():
tf.summary.scalar('loss',cost,step=step)
return cost
# Optimizers SGD 정의 (Learning Rate 0.,1)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
# Accuracy Function
def accuracy_fn(hypothesis, labels):
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, labels), dtype=tf.float32))
return accuracy
# GradientTape
def grad(hypothesis, features, labels, step):
with tf.GradientTape() as tape:
loss_value = loss_fn(neural_net(features, step),labels)
return tape.gradient(loss_value, [W1,W2,W3,W4,b1,b2,b3,b4])
# Epochs 3000회
EPOCHS = 3000
# Step - 3000
for step in range(EPOCHS):
# dataset x, y
for features, labels in dataset:
# data preprocessing
features, labels = preprocess_data(features, labels)
# gradient
grads = grad(neural_net(features,step), features, labels, step)
# optimizer
optimizer.apply_gradients(grads_and_vars=zip(grads,[W1,W2,W3,W4,b1,b2,b3,b4]))
# verbose 50
if step % 50 == 0:
loss_value = loss_fn(neural_net(features, step), labels)
print("Iter: {}, Loss: {:4f}".format(step, loss_value))
# preprocessing
x_data, y_data = preprocess_data(x_data,y_data)
# accuracy
test_acc = accuracy_fn(neural_net(x_data,step),y_data)
print("Testset Accuracy: {:.4f}".format(test_acc))
# Jupyter Notebook에서 Tensorboard 실행
# Load the TensorBoard notebook extension
%load_ext tensorboard
'''Start TensorBoard through the command line or within a notebook experience.
The two interfaces are generally the same. In notebooks, use the %tensorboard line magic.
On the command line, run the same command without '%". '''
%tensorboard --logdir logs/xor
comments powered by Disqus