Machine Learning
gradient descent 직접 구현하기 (linear regression)
jinmc
2023. 5. 17. 02:03
반응형
import numpy as np
# linear regression
x = np.random.rand(1000)
y = x * 2 + np.random.randn()
print(x, y)
# parameter
w = 0.0
b = 0.0
# hyperparameter
lr = 0.01
epochs = 100000
def linear_regression(w1:float, b1:float, x1:float) -> float:
# TODO
return w1 * x1 + b1
def loss_function(x2:np.array, y2:np.array, w2:float, b2:float) -> float:
# TODO
yhat = w2 * x2 + b2
return np.divide(np.sum((y2 - yhat) ** 2), x.shape[0])
def gradient_descent(x3:np.array, y3:np.array, w3:float, b3:float, lr:float) -> tuple:
# TODO
dw = 0.0
db = 0.0
N = x3.shape[0]
for xi, yi in zip(x3, y3):
dw += -2 * xi * (yi - linear_regression(w3, b3, xi))
db += -2 * (yi - linear_regression(w3, b3, xi))
w3 -= dw * lr * (1/N)
b3 -= db * lr * (1/N)
return w3, b3
for i in range(epochs):
w, b = gradient_descent(x,y,w,b,lr)
yhat = w * x + b
loss = np.divide(np.sum((y - yhat) ** 2), x.shape[0])
print(f"{i} weight {w:.4f} bias {b:.4f} loss {loss:.4f}")
반응형