Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import numpy as np
2import scipy.optimize as opt
3from ase.optimize.optimize import Optimizer
6class Converged(Exception):
7 pass
10class OptimizerConvergenceError(Exception):
11 pass
14class SciPyOptimizer(Optimizer):
15 """General interface for SciPy optimizers
17 Only the call to the optimizer is still needed
18 """
19 def __init__(self, atoms, logfile='-', trajectory=None,
20 callback_always=False, alpha=70.0, master=None,
21 force_consistent=None):
22 """Initialize object
24 Parameters:
26 atoms: Atoms object
27 The Atoms object to relax.
29 trajectory: string
30 Pickle file used to store trajectory of atomic movement.
32 logfile: file object or str
33 If *logfile* is a string, a file with that name will be opened.
34 Use '-' for stdout.
36 callback_always: book
37 Should the callback be run after each force call (also in the
38 linesearch)
40 alpha: float
41 Initial guess for the Hessian (curvature of energy surface). A
42 conservative value of 70.0 is the default, but number of needed
43 steps to converge might be less if a lower value is used. However,
44 a lower value also means risk of instability.
46 master: boolean
47 Defaults to None, which causes only rank 0 to save files. If
48 set to true, this rank will save files.
50 force_consistent: boolean or None
51 Use force-consistent energy calls (as opposed to the energy
52 extrapolated to 0 K). By default (force_consistent=None) uses
53 force-consistent energies if available in the calculator, but
54 falls back to force_consistent=False if not.
55 """
56 restart = None
57 Optimizer.__init__(self, atoms, restart, logfile, trajectory,
58 master, force_consistent=force_consistent)
59 self.force_calls = 0
60 self.callback_always = callback_always
61 self.H0 = alpha
63 def x0(self):
64 """Return x0 in a way SciPy can use
66 This class is mostly usable for subclasses wanting to redefine the
67 parameters (and the objective function)"""
68 return self.atoms.get_positions().reshape(-1)
70 def f(self, x):
71 """Objective function for use of the optimizers"""
72 self.atoms.set_positions(x.reshape(-1, 3))
73 # Scale the problem as SciPy uses I as initial Hessian.
74 return (self.atoms.get_potential_energy(
75 force_consistent=self.force_consistent) / self.H0)
77 def fprime(self, x):
78 """Gradient of the objective function for use of the optimizers"""
79 self.atoms.set_positions(x.reshape(-1, 3))
80 self.force_calls += 1
82 if self.callback_always:
83 self.callback(x)
85 # Remember that forces are minus the gradient!
86 # Scale the problem as SciPy uses I as initial Hessian.
87 return - self.atoms.get_forces().reshape(-1) / self.H0
89 def callback(self, x):
90 """Callback function to be run after each iteration by SciPy
92 This should also be called once before optimization starts, as SciPy
93 optimizers only calls it after each iteration, while ase optimizers
94 call something similar before as well.
96 :meth:`callback`() can raise a :exc:`Converged` exception to signal the
97 optimisation is complete. This will be silently ignored by
98 :meth:`run`().
99 """
100 f = self.atoms.get_forces()
101 self.log(f)
102 self.call_observers()
103 if self.converged(f):
104 raise Converged
105 self.nsteps += 1
107 def run(self, fmax=0.05, steps=100000000):
108 if self.force_consistent is None:
109 self.set_force_consistent()
110 self.fmax = fmax
111 try:
112 # As SciPy does not log the zeroth iteration, we do that manually
113 self.callback(None)
114 # Scale the problem as SciPy uses I as initial Hessian.
115 self.call_fmin(fmax / self.H0, steps)
116 except Converged:
117 pass
119 def dump(self, data):
120 pass
122 def load(self):
123 pass
125 def call_fmin(self, fmax, steps):
126 raise NotImplementedError
129class SciPyFminCG(SciPyOptimizer):
130 """Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
131 def call_fmin(self, fmax, steps):
132 output = opt.fmin_cg(self.f,
133 self.x0(),
134 fprime=self.fprime,
135 # args=(),
136 gtol=fmax * 0.1, # Should never be reached
137 norm=np.inf,
138 # epsilon=
139 maxiter=steps,
140 full_output=1,
141 disp=0,
142 # retall=0,
143 callback=self.callback)
144 warnflag = output[-1]
145 if warnflag == 2:
146 raise OptimizerConvergenceError(
147 'Warning: Desired error not necessarily achieved '
148 'due to precision loss')
151class SciPyFminBFGS(SciPyOptimizer):
152 """Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
153 def call_fmin(self, fmax, steps):
154 output = opt.fmin_bfgs(self.f,
155 self.x0(),
156 fprime=self.fprime,
157 # args=(),
158 gtol=fmax * 0.1, # Should never be reached
159 norm=np.inf,
160 # epsilon=1.4901161193847656e-08,
161 maxiter=steps,
162 full_output=1,
163 disp=0,
164 # retall=0,
165 callback=self.callback)
166 warnflag = output[-1]
167 if warnflag == 2:
168 raise OptimizerConvergenceError(
169 'Warning: Desired error not necessarily achieved '
170 'due to precision loss')
173class SciPyGradientlessOptimizer(Optimizer):
174 """General interface for gradient less SciPy optimizers
176 Only the call to the optimizer is still needed
178 Note: If you redefine x0() and f(), you don't even need an atoms object.
179 Redefining these also allows you to specify an arbitrary objective
180 function.
182 XXX: This is still a work in progress
183 """
184 def __init__(self, atoms, logfile='-', trajectory=None,
185 callback_always=False, master=None,
186 force_consistent=None):
187 """Initialize object
189 Parameters:
191 atoms: Atoms object
192 The Atoms object to relax.
194 trajectory: string
195 Pickle file used to store trajectory of atomic movement.
197 logfile: file object or str
198 If *logfile* is a string, a file with that name will be opened.
199 Use '-' for stdout.
201 callback_always: book
202 Should the callback be run after each force call (also in the
203 linesearch)
205 alpha: float
206 Initial guess for the Hessian (curvature of energy surface). A
207 conservative value of 70.0 is the default, but number of needed
208 steps to converge might be less if a lower value is used. However,
209 a lower value also means risk of instability.
211 master: boolean
212 Defaults to None, which causes only rank 0 to save files. If
213 set to true, this rank will save files.
215 force_consistent: boolean or None
216 Use force-consistent energy calls (as opposed to the energy
217 extrapolated to 0 K). By default (force_consistent=None) uses
218 force-consistent energies if available in the calculator, but
219 falls back to force_consistent=False if not.
220 """
221 restart = None
222 Optimizer.__init__(self, atoms, restart, logfile, trajectory,
223 master, force_consistent=force_consistent)
224 self.function_calls = 0
225 self.callback_always = callback_always
227 def x0(self):
228 """Return x0 in a way SciPy can use
230 This class is mostly usable for subclasses wanting to redefine the
231 parameters (and the objective function)"""
232 return self.atoms.get_positions().reshape(-1)
234 def f(self, x):
235 """Objective function for use of the optimizers"""
236 self.atoms.set_positions(x.reshape(-1, 3))
237 self.function_calls += 1
238 # Scale the problem as SciPy uses I as initial Hessian.
239 return self.atoms.get_potential_energy(
240 force_consistent=self.force_consistent)
242 def callback(self, x):
243 """Callback function to be run after each iteration by SciPy
245 This should also be called once before optimization starts, as SciPy
246 optimizers only calls it after each iteration, while ase optimizers
247 call something similar before as well.
248 """
249 # We can't assume that forces are available!
250 # f = self.atoms.get_forces()
251 # self.log(f)
252 self.call_observers()
253 # if self.converged(f):
254 # raise Converged
255 self.nsteps += 1
257 def run(self, ftol=0.01, xtol=0.01, steps=100000000):
258 if self.force_consistent is None:
259 self.set_force_consistent()
260 self.xtol = xtol
261 self.ftol = ftol
262 # As SciPy does not log the zeroth iteration, we do that manually
263 self.callback(None)
264 try:
265 # Scale the problem as SciPy uses I as initial Hessian.
266 self.call_fmin(xtol, ftol, steps)
267 except Converged:
268 pass
270 def dump(self, data):
271 pass
273 def load(self):
274 pass
276 def call_fmin(self, fmax, steps):
277 raise NotImplementedError
280class SciPyFmin(SciPyGradientlessOptimizer):
281 """Nelder-Mead Simplex algorithm
283 Uses only function calls.
285 XXX: This is still a work in progress
286 """
287 def call_fmin(self, xtol, ftol, steps):
288 opt.fmin(self.f,
289 self.x0(),
290 # args=(),
291 xtol=xtol,
292 ftol=ftol,
293 maxiter=steps,
294 # maxfun=None,
295 # full_output=1,
296 disp=0,
297 # retall=0,
298 callback=self.callback)
301class SciPyFminPowell(SciPyGradientlessOptimizer):
302 """Powell's (modified) level set method
304 Uses only function calls.
306 XXX: This is still a work in progress
307 """
308 def __init__(self, *args, **kwargs):
309 """Parameters:
311 direc: float
312 How much to change x to initially. Defaults to 0.04.
313 """
314 direc = kwargs.pop('direc', None)
315 SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
317 if direc is None:
318 self.direc = np.eye(len(self.x0()), dtype=float) * 0.04
319 else:
320 self.direc = np.eye(len(self.x0()), dtype=float) * direc
322 def call_fmin(self, xtol, ftol, steps):
323 opt.fmin_powell(self.f,
324 self.x0(),
325 # args=(),
326 xtol=xtol,
327 ftol=ftol,
328 maxiter=steps,
329 # maxfun=None,
330 # full_output=1,
331 disp=0,
332 # retall=0,
333 callback=self.callback,
334 direc=self.direc)