Page Menu
Home
c4science
Search
Configure Global Search
Log In
Files
F102776422
old_functions.py
No One
Temporary
Actions
Download File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Subscribers
None
File Metadata
Details
File Info
Storage
Attached
Created
Mon, Feb 24, 02:10
Size
4 KB
Mime Type
text/x-python
Expires
Wed, Feb 26, 02:10 (2 d)
Engine
blob
Format
Raw Data
Handle
24421281
Attached To
rTZUCT ML_Project1
old_functions.py
View Options
import
datetime
import
numpy
as
np
import
matplotlib.pyplot
as
plt
from
proj1_helpers
import
*
def
compute_mse
(
y
,
tx
,
w
):
return
np
.
mean
((
y
-
tx
@w
)
**
2
)
/
2
def
least_squares
(
y
,
tx
):
"""calculate the least squares solution."""
X
=
np
.
transpose
(
tx
)
@tx
Y
=
np
.
transpose
(
tx
)
@y
w
=
np
.
linalg
.
pinv
(
X
)
@Y
mse
=
np
.
mean
((
y
-
tx
@w
)
**
2
)
/
2
return
mse
,
w
def
compute_mse_gradient
(
y
,
tx
,
w
):
"""Compute the gradient."""
gradient_helper
=
np
.
transpose
([(
y
-
tx
@w
)]
*
np
.
shape
(
tx
)[
1
])
return
np
.
sum
(
gradient_helper
*
(
-
tx
),
axis
=
0
)
/
len
(
y
)
def
least_squares_GD
(
y
,
tx
,
initial_w
,
max_iters
,
gamma
):
"""Gradient descent algorithm."""
# Define parameters to store w and loss
ws
=
[
initial_w
]
losses
=
[]
w
=
initial_w
for
n_iter
in
range
(
max_iters
):
# ***************************************************
gradient
=
compute_mse_gradient
(
y
,
tx
,
w
)
loss
=
compute_mse
(
y
,
tx
,
w
)
# ***************************************************
w
=
w
-
gamma
*
gradient
# ***************************************************
ws
.
append
(
w
)
losses
.
append
(
loss
)
print
(
"Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}"
.
format
(
bi
=
n_iter
,
ti
=
max_iters
-
1
,
l
=
loss
,
w0
=
w
[
0
],
w1
=
w
[
1
]))
return
losses
,
ws
def
least_squares_SGD
(
y
,
tx
,
initial_w
,
batch_size
,
max_iters
,
gamma
):
"""Stochastic gradient descent algorithm."""
# ***************************************************
ws
=
[
initial_w
]
losses
=
[]
w
=
initial_w
for
n_iter
in
range
(
max_iters
):
for
y_i
,
tx_i
in
batch_iter
(
y
,
tx
,
batch_size
):
gradient
=
compute_mse_gradient
(
y_i
,
tx_i
,
w
)
loss
=
compute_mse
(
y_i
,
tx_i
,
w
)
w
=
w
-
gamma
*
gradient
ws
.
append
(
w
)
losses
.
append
(
loss
)
print
(
"Stochastic Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}"
.
format
(
bi
=
n_iter
,
ti
=
max_iters
-
1
,
l
=
loss
,
w0
=
w
[
0
],
w1
=
w
[
1
]))
return
losses
,
ws
def
ridge_regression
(
y
,
tx
,
lambda_
):
"""implement ridge regression."""
L
=
np
.
identity
(
tx
.
shape
[
1
])
#L[0][0] = 0
a
=
np
.
transpose
(
tx
)
@tx
+
2
*
len
(
y
)
*
lambda_
*
L
b
=
np
.
transpose
(
tx
)
@y
w
=
np
.
linalg
.
solve
(
a
,
b
)
return
compute_mse
(
y
,
tx
,
w
),
w
def
sigmoid
(
t
):
return
1
/
(
1
+
np
.
power
(
np
.
e
,
-
t
))
def
calculate_log_loss
(
y
,
tx
,
w
):
"""compute the cost by negative log likelihood."""
return
np
.
sum
(
np
.
log
(
1
+
np
.
power
(
np
.
e
,
tx
@w
))
-
y
*
tx
@w
)
def
calculate_log_gradient
(
y
,
tx
,
w
):
"""compute the gradient of loss."""
return
tx
.
T
@
(
sigmoid
(
tx
@w
)
-
y
)
def
calculate_hessian
(
y
,
tx
,
w
):
"""return the hessian of the loss function."""
S
=
sigmoid
(
tx
@w
)
*
(
1
-
sigmoid
(
tx
@w
))
S
=
np
.
identity
(
len
(
S
))
*
S
return
(
tx
.
T
@S@tx
)
def
logistic_gradient_descent
(
y
,
tx
,
w
,
gamma
):
"""
Do one step of gradient descen using logistic regression.
Return the loss and the updated w.
"""
# ***************************************************
loss
=
calculate_log_loss
(
y
,
tx
,
w
)
# ***************************************************
gradient
=
calculate_log_gradient
(
y
,
tx
,
w
)
# ***************************************************
w
=
w
-
gamma
*
gradient
# ***************************************************
return
loss
,
w
def
logistic_regression
(
y
,
tx
,
w
):
"""return the loss, gradient, and hessian."""
loss
=
calculate_log_loss
(
y
,
tx
,
w
)
gradient
=
calculate_log_gradient
(
y
,
tx
,
w
)
hessian
=
calculate_hessian
(
y
,
tx
,
w
)
return
loss
,
gradient
,
hessian
def
learning_by_newton_method
(
y
,
tx
,
w
):
"""
Do one step on Newton's method.
return the loss and updated w.
"""
loss
,
gradient
,
hessian
=
logistic_regression
(
y
,
tx
,
w
)
# ***************************************************
w_new
=
w
-
np
.
linalg
.
pinv
(
hessian
)
@gradient
return
loss
,
w_new
def
penalized_logistic_regression
(
y
,
tx
,
w
,
lambda_
):
"""return the loss, gradient, and hessian."""
loss
=
np
.
sum
(
np
.
log
(
1
+
np
.
exp
(
tx
@w
))
-
y
*
tx
@w
)
gradient
=
tx
.
T
@
(
sigmoid
(
tx
@w
)
-
y
)
+
lambda_
*
w
hessian
=
calculate_hessian
(
y
,
tx
,
w
)
+
lambda_
*
np
.
identity
(
len
(
w
))
return
loss
,
gradient
,
hessian
def
learning_by_penalized_gradient
(
y
,
tx
,
w
,
gamma
,
lambda_
):
loss
,
gradient
,
hessian
=
penalized_logistic_regression
(
y
,
tx
,
w
,
lambda_
)
# ***************************************************
w_new
=
w
-
np
.
linalg
.
pinv
(
hessian
)
@gradient
#w-gamma*gradient
return
loss
,
w_new
Event Timeline
Log In to Comment