[PyTorch]Linear, Ridge, Lasso, ElasticNet implementation-코드 미완성
1. Linear Regression Numpy Implementation
from sklearn.datasets import make_regression
import matplotlib.pyplot as plt
import numpy as np
X, y= make_regression(n_samples=100, n_features=1, noise=0.4, bias=50)
def plotLine(theta0, theta1, X, y):
max_x = np.max(X) + 100
min_x = np.min(X) - 100
xplot = np.linspace(min_x, max_x, 1000)
yplot = theta0 + theta1 * xplot
plt.plot(xplot, yplot, color='#58b970', label='Regression Line')
plt.scatter(X,y)
plt.axis([-10, 10, 0, 200])
plt.show()
def hypothesis(theta0, theta1, x):
return theta0 + (theta1*x)
def cost(theta0, theta1, X, y):
costValue = 0
for (xi, yi) in zip(X, y):
costValue += 0.5 * ((hypothesis(theta0, theta1, xi) - yi)**2)
return costValue
def derivatives(theta0, theta1, X, y):
dtheta0 = 0
dtheta1 = 0
for (xi, yi) in zip(X, y):
dtheta0 += hypothesis(theta0, theta1, xi) - yi
dtheta1 += (hypothesis(theta0, theta1, xi) - yi)*xi
dtheta0 /= len(X)
dtheta1 /= len(X)
return dtheta0, dtheta1
def updateParameters(theta0, theta1, X, y, alpha):
dtheta0, dtheta1 = derivatives(theta0, theta1, X, y)
theta0 = theta0 - (alpha * dtheta0)
theta1 = theta1 - (alpha * dtheta1)
return theta0, theta1
def LinearRegression(X, y):
theta0 = np.random.rand()
theta1 = np.random.rand()
for i in range(0, 1000):
if i % 100 == 0:
plotLine(theta0, theta1, X, y)
print(cost(theta0, theta1, X, y))
theta0, theta1 = updateParameters(theta0, theta1, X, y, 0.005)
LinearRegression(X, y)
[302493.21138417]
[110433.00362617]
[40402.32548701]
[14812.95164304]
[5443.97939662]
[2007.39956265]
[744.68382217]
[279.98276534]
[108.71444941]
[45.5075413]
2. Ridge Regression Numpy Implementation
def hypothesis(theta0, theta1, x):
return theta0 + (theta1*x)
def Ridge_cost(theta0, theta1, X, y):
costValue = 0
for (xi, yi) in zip(X, y):
costValue += 0.5 * ((hypothesis(theta0, theta1, xi) - yi)**2)
costValue +=
return costValue
def Ridge_derivatives(theta0, theta1, X, y):
dtheta0 = 0
dtheta1 = 0
for (xi, yi) in zip(X, y):
dtheta0 += hypothesis(theta0, theta1, xi) - yi
dtheta1 += (hypothesis(theta0, theta1, xi) - yi)*xi
dtheta0 /= len(X)
dtheta1 /= len(X)
return dtheta0, dtheta1
def updateParameters(theta0, theta1, X, y, alpha):
dtheta0, dtheta1 = Ridge_derivatives(theta0, theta1, X, y)
theta0 = theta0 - (alpha * dtheta0)
theta1 = theta1 - (alpha * dtheta1)
return theta0, theta1
def RidgeRegression(X, y):
theta0 = np.random.rand()
theta1 = np.random.rand()
for i in range(0, 1000):
if i % 100 == 0:
plotLine(theta0, theta1, X, y)
print(Ridge_cost(theta0, theta1, X, y))
theta0, theta1 = updateParameters(theta0, theta1, X, y, 0.005)
RidgeRegression(X, y)
[305493.70822568]
[111556.2529174]
[40822.75853898]
[14970.2999332]
[5502.86073888]
[2029.43132433]
[752.92674748]
[283.06650969]
[109.86801957]
[45.93904217]
3. Lasso Regression Numpy Implementation
def hypothesis(theta0, theta1, x):
return theta0 + (theta1*x)
def Lasso_cost(theta0, theta1, X, y):
costValue = 0
for (xi, yi) in zip(X, y):
costValue += 0.5 * ((hypothesis(theta0, theta1, xi) - yi)**2)
return costValue
def Lasso_derivatives(theta0, theta1, X, y):
dtheta0 = 0
dtheta1 = 0
for (xi, yi) in zip(X, y):
dtheta0 += hypothesis(theta0, theta1, xi) - yi
dtheta1 += (hypothesis(theta0, theta1, xi) - yi)*xi
dtheta0 /= len(X)
dtheta1 /= len(X)
return dtheta0, dtheta1
def updateParameters(theta0, theta1, X, y, alpha):
dtheta0, dtheta1 = Lasso_derivatives(theta0, theta1, X, y)
theta0 = theta0 - (alpha * dtheta0)
theta1 = theta1 - (alpha * dtheta1)
return theta0, theta1
def LassoRegression(X, y):
theta0 = np.random.rand()
theta1 = np.random.rand()
for i in range(0, 1000):
if i % 100 == 0:
plotLine(theta0, theta1, X, y)
print(Lasso_cost(theta0, theta1, X, y))
theta0, theta1 = updateParameters(theta0, theta1, X, y, 0.005)
LassoRegression(X, y)
[307794.36779767]
[112448.67312985]
[41167.18687274]
[15102.67157781]
[5553.55296535]
[2048.78522037]
[760.29670989]
[285.86670904]
[110.92988827]
[46.34103812]
4. ElasticNet Numpy Implementation (HW1)
def hypothesis(theta0, theta1, x):
return theta0 + (theta1*x)
def EN_cost(theta0, theta1, X, y):
costValue = 0
for (xi, yi) in zip(X, y):
costValue += 0.5 * ((hypothesis(theta0, theta1, xi) - yi)**2)
return costValue
def EN_derivatives(theta0, theta1, X, y):
dtheta0 = 0
dtheta1 = 0
for (xi, yi) in zip(X, y):
dtheta0 += hypothesis(theta0, theta1, xi) - yi
dtheta1 += (hypothesis(theta0, theta1, xi) - yi)*xi
dtheta0 /= len(X)
dtheta1 /= len(X)
return dtheta0, dtheta1
def updateParameters(theta0, theta1, X, y, alpha):
dtheta0, dtheta1 = EN_derivatives(theta0, theta1, X, y)
theta0 = theta0 - (alpha * dtheta0)
theta1 = theta1 - (alpha * dtheta1)
return theta0, theta1
def ENRegression(X, y):
theta0 = np.random.rand()
theta1 = np.random.rand()
for i in range(0, 1000):
if i % 100 == 0:
plotLine(theta0, theta1, X, y)
print(EN_cost(theta0, theta1, X, y))
theta0, theta1 = updateParameters(theta0, theta1, X, y, 0.005)
ENRegression(X, y)
[306402.60804692]
[111852.97684341]
[40919.30139496]
[15001.58623]
[5512.95154434]
[2032.66741257]
[753.95738261]
[283.39195684]
[109.96969251]
[45.97037289]
댓글남기기