Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Jun 30, 2023
1 parent 80d9fbf commit 34fec21
Showing 1 changed file with 22 additions and 20 deletions.
42 changes: 22 additions & 20 deletions implementation/artificial_intelligence/lasso_regession.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,28 @@
import numpy as np

# refence:
# https://stats.stackexchange.com/questions/\
# 502376/\
# what-is-gradient-of-the-objective-function-of-lasso-regression


def costFunction(X_data: np.array, y_data: np.array,
alpha: float,
theta: np.ndarray,
panjang_data: int) -> float:

def costFunction(
X_data: np.array,
y_data: np.array,
alpha: float,
theta: np.ndarray,
panjang_data: int,
) -> float:
prod = np.dot(theta, X_data)
prod = prod - y_data
result = np.sum(prod ** 2) + (alpha * np.sum(np.abs(theta.T)))
result = np.sum(prod**2) + (alpha * np.sum(np.abs(theta.T)))
result = (result) / (2 * panjang_data)
return result


def gradient_descent_step(X_data: np.array,
y_data: np.array,
alpha: float,
theta: np.array,
panjang_data: int) -> float:

def gradient_descent_step(
X_data: np.array, y_data: np.array, alpha: float, theta: np.array, panjang_data: int
) -> float:
prod = np.dot(theta, X_data)
prod = prod - y_data
sum_grad = np.dot(prod, X_data.T) + alpha * np.sign(theta.T)
Expand All @@ -31,11 +31,13 @@ def gradient_descent_step(X_data: np.array,


class Lasso:
def __init__(self,
aplha: float = 1.0,
fit_intercept: bool = True,
iterable: int = 1000,
learning_path: float = .0001) -> None:
def __init__(
self,
aplha: float = 1.0,
fit_intercept: bool = True,
iterable: int = 1000,
learning_path: float = 0.0001,
) -> None:
"""_summary_
lasso regession merupakan regulasi alternatif
pemodelan linear regession yang bertujuan agar model regessi
Expand Down Expand Up @@ -68,9 +70,8 @@ def fit(self, x: np.array, y: np.array) -> object:
Y = np.array(y)
theta = np.zeros(panjang_fitur)
for _ in range(self.iterable):
theta = gradient_descent_step(
X, Y, self.alpha, theta, panjang_data)
costFunction(X, Y, self.alpha , theta , panjang_data)
theta = gradient_descent_step(X, Y, self.alpha, theta, panjang_data)
costFunction(X, Y, self.alpha, theta, panjang_data)
return theta

def transforms(self, x: np.array) -> np.array:
Expand All @@ -95,4 +96,5 @@ def main():

if __name__ == "__main__":
import doctest

doctest.testmod(verbose=True)

0 comments on commit 34fec21

Please sign in to comment.