일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | 2 | 3 | ||||
4 | 5 | 6 | 7 | 8 | 9 | 10 |
11 | 12 | 13 | 14 | 15 | 16 | 17 |
18 | 19 | 20 | 21 | 22 | 23 | 24 |
25 | 26 | 27 | 28 | 29 | 30 | 31 |
Tags
- 빅분기실기
- 데이터분석
- 케라스
- 의학논문
- 파이썬
- 컴퓨터비전
- 데이터분석가
- resnet
- 딥러닝
- TensorFlow
- Python
- machinelearning
- 데이터EDA
- 데이터모델링
- 의학통계
- 빅분기
- 데이터사이언스
- ComputerVision
- 통계
- 코딩테스트
- 빅데이터분석기사
- Deeplearning
- 데이터전처리
- 머신러닝
- AI
- CNN
- Keras
- 인공지능
- mnist
- 텐서플로우
- Today
- Total
Be Brave, Be Humble
LinearRegression_Example 본문
4개의 입력데이터 연산 (A1-A2+A3-A4) 예측하는 Linear Regression Batch 예제
data definition
In [ ]:
import numpy as np
from datetime import datetime
loaded_data = np.loadtxt('./sps.csv', delimiter=',', dtype=np.float32)
x_data = loaded_data[ :, 1:]
t_data = loaded_data[ :, [0]]
# 데이터 차원 및 shape 확인
print("loaded_data.ndim = ", loaded_data.ndim, ", loaded_data.shape = ", loaded_data.shape)
print("x_data.ndim = ", x_data.ndim, ", x_data.shape = ", x_data.shape)
print("t_data.ndim = ", t_data.ndim, ", t_data.shape = ", t_data.shape)
loaded_data.ndim = 2 , loaded_data.shape = (50, 5)
x_data.ndim = 2 , x_data.shape = (50, 4)
t_data.ndim = 2 , t_data.shape = (50, 1)
initialize weights and bias
In [ ]:
np.random.seed(0)
W = np.random.rand(4,1) # 4X1 행렬
b = np.random.rand(1)
print("W = ", W, ", W.shape = ", W.shape, ", b = ", b, ", b.shape = ", b.shape)
W = [[0.5488135 ]
[0.71518937]
[0.60276338]
[0.54488318]] , W.shape = (4, 1) , b = [0.4236548] , b.shape = (1,)
define loss function and output, y
In [ ]:
def loss_func(x, t):
y = np.dot(x,W) + b
return ( np.sum( (t - y)**2 ) ) / ( len(x) )
In [ ]:
def numerical_derivative(f, x):
delta_x = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + delta_x
fx1 = f(x) # f(x+delta_x)
x[idx] = float(tmp_val) - delta_x
fx2 = f(x) # f(x-delta_x)
grad[idx] = (fx1 - fx2) / (2*delta_x)
x[idx] = tmp_val
it.iternext()
return grad
In [ ]:
# 손실함수 값 계산 함수
# 입력변수 x, t : numpy type
def error_val(x, t):
y = np.dot(x,W) + b
return ( np.sum( (t - y)**2 ) ) / ( len(x) )
learning
In [ ]:
learning_rate = 1e-3 #
f = lambda x : loss_func(x_data,t_data)
print("Initial error value = ", error_val(x_data, t_data), "Initial W = ", W, "\n", ", b = ", b )
start_time = datetime.now()
for step in range(30001): # 3만번 반복수행
W -= learning_rate * numerical_derivative(f, W)
b -= learning_rate * numerical_derivative(f, b)
if (step % 500 == 0):
print("step = ", step, "error value = ", error_val(x_data, t_data) )
end_time = datetime.now()
print("")
print("Elapsed Time => ", end_time - start_time)
Initial error value = 64.38302549674624 Initial W = [[0.5488135 ]
[0.71518937]
[0.60276338]
[0.54488318]]
, b = [0.4236548]
step = 0 error value = 60.604776072341444
step = 500 error value = 0.01167190429026227
step = 1000 error value = 0.004047845907167936
step = 1500 error value = 0.0014043526649038332
step = 2000 error value = 0.00048722369128301977
step = 2500 error value = 0.00016903654707396402
step = 3000 error value = 5.864524808192556e-05
step = 3500 error value = 2.0346281216250795e-05
step = 4000 error value = 7.0589037112171215e-06
step = 4500 error value = 2.4490038781355694e-06
step = 5000 error value = 8.496531813563435e-07
step = 5500 error value = 2.9477720922949027e-07
step = 6000 error value = 1.0226949652836696e-07
step = 6500 error value = 3.5481202727676135e-08
step = 7000 error value = 1.230978727513882e-08
step = 7500 error value = 4.2707363649495105e-09
step = 8000 error value = 1.4816819081469759e-09
step = 8500 error value = 5.14052165565946e-10
step = 9000 error value = 1.7834437166114527e-10
step = 9500 error value = 6.187448869478846e-11
step = 10000 error value = 2.1466628382414438e-11
step = 10500 error value = 7.447595024911832e-12
step = 11000 error value = 2.583855772566766e-12
step = 11500 error value = 8.964384646654429e-13
step = 12000 error value = 3.1100881478795545e-13
step = 12500 error value = 1.0790086188891214e-13
step = 13000 error value = 3.7434938904037783e-14
step = 13500 error value = 1.2987613159065e-14
step = 14000 error value = 4.5059000875171345e-15
step = 14500 error value = 1.5632692166840009e-15
step = 15000 error value = 5.423579204531311e-16
step = 15500 error value = 1.8816472159910655e-16
step = 16000 error value = 6.528154409141282e-17
step = 16500 error value = 2.2648666703984575e-17
step = 17000 error value = 7.857690960008845e-18
step = 17500 error value = 2.726133322193127e-18
step = 18000 error value = 9.458002967175832e-19
step = 18500 error value = 3.2813425922981097e-19
step = 19000 error value = 1.1384228300504383e-19
step = 19500 error value = 3.9496353646178484e-20
step = 20000 error value = 1.370281435831186e-20
step = 20500 error value = 4.754125218582716e-21
step = 21000 error value = 1.6494531600977032e-21
step = 21500 error value = 5.723172523227641e-22
step = 22000 error value = 1.9859337893496504e-22
step = 22500 error value = 6.893889116469676e-23
step = 23000 error value = 2.3940569803507154e-23
step = 23500 error value = 8.318580669294951e-24
step = 24000 error value = 2.8926538754400545e-24
step = 24500 error value = 1.007017095562386e-24
step = 25000 error value = 3.5189776953551637e-25
step = 25500 error value = 1.2368469252287204e-25
step = 26000 error value = 4.4692299850792143e-26
step = 26500 error value = 1.692665607750921e-26
step = 27000 error value = 6.981567293526183e-27
step = 27500 error value = 3.257963561703718e-27
step = 28000 error value = 1.7847429561181468e-27
step = 28500 error value = 1.1894827601256653e-27
step = 29000 error value = 9.176150809230155e-28
step = 29500 error value = 7.812023703927484e-28
step = 30000 error value = 7.296706880293302e-28
Elapsed Time => 0:00:06.304803
In [ ]:
print(W)
print(b)
[[ 1.]
[-1.]
[ 1.]
[-1.]]
[2.38435128e-14]
evaluate and predict
In [ ]:
# 학습을 마친 후, 임의의 데이터에 대해 미래 값 예측 함수
# 입력변수 x : numpy type
def predict(x):
y = np.dot(x,W) + b
return y
In [ ]:
ex_data_01 = np.array([4, 4, 4, 4]) # 4 - 4 + 4 - 4 = 0
print("predicted value = ", predict(ex_data_01) )
predicted value = [-3.21117277e-14]
In [ ]:
ex_data_02 = np.array([-3, 0, 9, -1]) # -3 -0 +9 -(-1) = 7
print("predicted value = ", predict(ex_data_02) )
predicted value = [7.]
In [ ]:
ex_data_03 = np.array([-7, -9, -2, 8]) # -7 -(-9) + (-2) -8 = -8
print("predicted value = ", predict(ex_data_03) )
predicted value = [-8.]
In [ ]:
ex_data_04 = np.array([1, -2, 3, -2]) # 1 -(-2) + 3 -(-2) = 8
print("predicted value = ", predict(ex_data_04) )
predicted value = [8.]
In [ ]:
ex_data_05 = np.array([19, -12, 0, -76]) # 19 -(-12) + 0 -(-76) = 107
print("predicted value = ", predict(ex_data_05) )
predicted value = [107.]
In [ ]:
ex_data_06 = np.array([2001, -1, 109, 31]) # 2001 -(-1) + 109 -(31) = 2080
print("predicted value = ", predict(ex_data_06) )
predicted value = [2080.]
In [ ]:
ex_data_07 = np.array([99999, -8911, 10009, 1231331]) # 99999 -(-8911) + 10009 -(1231331) = -1112412
print("predicted value = ", predict(ex_data_07) )
predicted value = [-1112412.00000001]
In [ ]:
'AI > Computer Vision' 카테고리의 다른 글
Keras_Simple_Linear_Regression_Exercise_Sequential (0) | 2022.08.01 |
---|---|
multi_variable_linear_regression (0) | 2022.08.01 |
Logistic_XOR_Regression (0) | 2022.07.25 |
LogisticRegression_Example (0) | 2022.07.25 |
NumericalDerivative_Example (0) | 2022.07.25 |
Comments