summaryrefslogtreecommitdiffstats
path: root/mlplib/optimize_test.py
blob: 9dd961fedb05dec46cf37350af627bf23283a0a2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
"""
# -*- coding: utf-8 -*-
#
# Copyright 2021 Michael Büsch <m@bues.ch>
#
# Licensed under the Apache License version 2.0
# or the MIT license, at your option.
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
"""

from mlplib.activation import *
from mlplib.backward import *
from mlplib.forward import *
from mlplib.init import *
from mlplib.loss import *
from mlplib.optimize import *
from mlplib.parameters import *
from mlplib.util import *
import numpy as np

def make_net():
    seed(42)
    nr_inputs = 4
    layout = (6, 9, 2)
    params = Parameters(
        weights=init_layers_weights(nr_inputs, layout),
        biases=init_layers_biases(layout),
        actvns=[
            ReLU(),
            ReLU(),
            Sigmoid(),
        ],
    )
    return params

def run_net(params, optimizer, decay=False):
    x = standard_normal((20, params.nr_inputs))
    y = standard_normal((20, params.layout[-1]))
    lossfn = MSE()
    prev_loss = 9999.0
    prev_alpha = optimizer.alpha + 9999.0
    for i in range(100):
        gradients, yh = backward_prop(x, y, params, lossfn)

        new_loss = lossfn.fn(yh, y)
        assert new_loss < prev_loss
        prev_loss = new_loss

        optimizer.apply(i, gradients)

        if decay:
            assert optimizer.alpha < prev_alpha
            prev_alpha = optimizer.alpha

def test_gradient_descent():
    params = make_net()
    optimizer = GradDescent(params=params, alpha=0.1)
    run_net(params, optimizer)

def test_momentum():
    params = make_net()
    optimizer = Momentum(params=params, alpha=0.1, beta=0.9)
    run_net(params, optimizer)

def test_rms_prop():
    params = make_net()
    optimizer = RMSProp(params=params, alpha=0.01, beta=0.9)
    run_net(params, optimizer)

def test_adam():
    params = make_net()
    optimizer = Adam(params=params, alpha=0.01, beta1=0.9, beta2=0.9)
    run_net(params, optimizer)

def test_decay_simple():
    params = make_net()
    optimizer = AlphaDecaySimple(
        Adam(params=params, alpha=0.01, beta1=0.9, beta2=0.9),
        decay_rate=0.15)
    run_net(params, optimizer, decay=True)

def test_decay_exp():
    params = make_net()
    optimizer = AlphaDecayExp(
        Adam(params=params, alpha=0.01, beta1=0.9, beta2=0.9),
        decay_rate=0.15)
    run_net(params, optimizer, decay=True)

def test_decay_sqrt():
    params = make_net()
    optimizer = AlphaDecaySqrt(
        Adam(params=params, alpha=0.01, beta1=0.9, beta2=0.9),
        decay_rate=0.15)
    run_net(params, optimizer, decay=True)

# vim: ts=4 sw=4 expandtab
bues.ch cgit interface