-
Notifications
You must be signed in to change notification settings - Fork 2
/
run_nonconvex_opt.py
131 lines (93 loc) · 3.3 KB
/
run_nonconvex_opt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
"""
Continuation Path Learning (CPL) for Non-convex Optimization
"""
import torch
from problem import get_problem
from model import ParetoSetModel
import timeit
# device
device = 'cpu'
# number of independent runs
n_run = 20
# instance name, from ['ackley', 'himmelblau', 'rosenbrock', ]
test_ins = 'ackley'
if test_ins == 'ackley':
# number of learning steps
n_steps = 225
# number of sampled homotopy levels at each step
n_levels = 4
# number of local search
n_local_search = 100
# initial solution
init_point = torch.tensor([5,5]).to(device)
if test_ins == 'himmelblau':
# number of learning steps
n_steps = 450
# number of sampled homotopy levels at each step
n_levels = 4
# number of local search
n_local_search = 200
# initial solution
init_point = torch.tensor([-3,-2]).to(device)
if test_ins == 'rosenbrock':
# number of learning steps
n_steps = 4500
# number of sampled homotopy levels at each step
n_levels = 4
# number of local search
n_local_search = 2000
# initial solution
init_point = torch.tensor([-3,2]).to(device)
problem = get_problem(test_ins)
n_dim = problem.n_dim
# repeatedly run the algorithm n_run times
value_list = []
for run_iter in range(n_run):
# model initialization
psmodel = ParetoSetModel(n_dim)
psmodel.to(device)
psmodel.train()
# let the model generate the same initial solutions with other homotopy methods
optimizer = torch.optim.Adam(psmodel.parameters(), lr=1e-4)
for i in range(1000):
t = torch.ones([1,1]).to(device)
x = psmodel(t)
loss = torch.abs(x - init_point).sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# path model training
start = timeit.default_timer()
optimizer = torch.optim.Adam(psmodel.parameters(), lr=5e-3)
for t_step in range(n_steps):
# sample n_levels homotopy levels
t = torch.rand([n_levels,1]).to(device)
t[0] = 1
# get the current coressponding solutions and gradients
x = psmodel(t)
value, grad = problem.evaluate(x,t)
# gradient-based continuation path model update
optimizer.zero_grad()
psmodel(t).backward(grad)
optimizer.step()
# optional local search for homotopy level t = 1
t = torch.ones([1,1]).to(device)
for t_step in range(n_local_search):
x = psmodel(t)
value, grad = problem.evaluate(x,t)
optimizer.zero_grad()
psmodel(t).backward(grad)
optimizer.step()
# pring the final solution with homotopy level t = 1
t = torch.ones([1,1]).to(device)
x = psmodel(t)
value, grad = problem.evaluate(x,t)
print('Run', run_iter+1)
print('Solution:', x)
print('Value:', value.item())
value_list.append(value)
stop = timeit.default_timer()
print('Time: ', stop - start)
print("************************************************************")
avg_value = torch.mean(torch.tensor(value_list))
print('Average Value: ', avg_value.item())