Coverage for /builds/debichem-team/python-ase/ase/optimize/sciopt.py: 68.22%
107 statements
« prev ^ index » next coverage.py v7.5.3, created at 2025-03-06 04:00 +0000
« prev ^ index » next coverage.py v7.5.3, created at 2025-03-06 04:00 +0000
1from typing import IO, Optional, Union
3import numpy as np
4import scipy.optimize as opt
6from ase import Atoms
7from ase.optimize.optimize import Optimizer
10class Converged(Exception):
11 pass
14class OptimizerConvergenceError(Exception):
15 pass
18class SciPyOptimizer(Optimizer):
19 """General interface for SciPy optimizers
21 Only the call to the optimizer is still needed
22 """
24 def __init__(
25 self,
26 atoms: Atoms,
27 logfile: Union[IO, str] = '-',
28 trajectory: Optional[str] = None,
29 callback_always: bool = False,
30 alpha: float = 70.0,
31 **kwargs,
32 ):
33 """Initialize object
35 Parameters
36 ----------
37 atoms: :class:`~ase.Atoms`
38 The Atoms object to relax.
40 trajectory: str
41 Trajectory file used to store optimisation path.
43 logfile: file object or str
44 If *logfile* is a string, a file with that name will be opened.
45 Use '-' for stdout.
47 callback_always: bool
48 Should the callback be run after each force call (also in the
49 linesearch)
51 alpha: float
52 Initial guess for the Hessian (curvature of energy surface). A
53 conservative value of 70.0 is the default, but number of needed
54 steps to converge might be less if a lower value is used. However,
55 a lower value also means risk of instability.
57 kwargs : dict, optional
58 Extra arguments passed to
59 :class:`~ase.optimize.optimize.Optimizer`.
61 """
62 restart = None
63 Optimizer.__init__(self, atoms, restart, logfile, trajectory, **kwargs)
64 self.force_calls = 0
65 self.callback_always = callback_always
66 self.H0 = alpha
67 self.max_steps = 0
69 def x0(self):
70 """Return x0 in a way SciPy can use
72 This class is mostly usable for subclasses wanting to redefine the
73 parameters (and the objective function)"""
74 return self.optimizable.get_positions().reshape(-1)
76 def f(self, x):
77 """Objective function for use of the optimizers"""
78 self.optimizable.set_positions(x.reshape(-1, 3))
79 # Scale the problem as SciPy uses I as initial Hessian.
80 return self.optimizable.get_potential_energy() / self.H0
82 def fprime(self, x):
83 """Gradient of the objective function for use of the optimizers"""
84 self.optimizable.set_positions(x.reshape(-1, 3))
85 self.force_calls += 1
87 if self.callback_always:
88 self.callback(x)
90 # Remember that forces are minus the gradient!
91 # Scale the problem as SciPy uses I as initial Hessian.
92 return - self.optimizable.get_forces().reshape(-1) / self.H0
94 def callback(self, x):
95 """Callback function to be run after each iteration by SciPy
97 This should also be called once before optimization starts, as SciPy
98 optimizers only calls it after each iteration, while ase optimizers
99 call something similar before as well.
101 :meth:`callback`() can raise a :exc:`Converged` exception to signal the
102 optimisation is complete. This will be silently ignored by
103 :meth:`run`().
104 """
105 if self.nsteps < self.max_steps:
106 self.nsteps += 1
107 f = self.optimizable.get_forces()
108 self.log(f)
109 self.call_observers()
110 if self.converged(f):
111 raise Converged
113 def run(self, fmax=0.05, steps=100000000):
114 self.fmax = fmax
116 try:
117 # As SciPy does not log the zeroth iteration, we do that manually
118 if self.nsteps == 0:
119 self.log()
120 self.call_observers()
122 self.max_steps = steps + self.nsteps
124 # Scale the problem as SciPy uses I as initial Hessian.
125 self.call_fmin(fmax / self.H0, steps)
126 except Converged:
127 pass
128 return self.converged()
130 def dump(self, data):
131 pass
133 def load(self):
134 pass
136 def call_fmin(self, fmax, steps):
137 raise NotImplementedError
140class SciPyFminCG(SciPyOptimizer):
141 """Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
143 def call_fmin(self, fmax, steps):
144 output = opt.fmin_cg(self.f,
145 self.x0(),
146 fprime=self.fprime,
147 # args=(),
148 gtol=fmax * 0.1, # Should never be reached
149 norm=np.inf,
150 # epsilon=
151 maxiter=steps,
152 full_output=1,
153 disp=0,
154 # retall=0,
155 callback=self.callback)
156 warnflag = output[-1]
157 if warnflag == 2:
158 raise OptimizerConvergenceError(
159 'Warning: Desired error not necessarily achieved '
160 'due to precision loss')
163class SciPyFminBFGS(SciPyOptimizer):
164 """Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
166 def call_fmin(self, fmax, steps):
167 output = opt.fmin_bfgs(self.f,
168 self.x0(),
169 fprime=self.fprime,
170 # args=(),
171 gtol=fmax * 0.1, # Should never be reached
172 norm=np.inf,
173 # epsilon=1.4901161193847656e-08,
174 maxiter=steps,
175 full_output=1,
176 disp=0,
177 # retall=0,
178 callback=self.callback)
179 warnflag = output[-1]
180 if warnflag == 2:
181 raise OptimizerConvergenceError(
182 'Warning: Desired error not necessarily achieved '
183 'due to precision loss')
186class SciPyGradientlessOptimizer(Optimizer):
187 """General interface for gradient less SciPy optimizers
189 Only the call to the optimizer is still needed
191 Note: If you redefine x0() and f(), you don't even need an atoms object.
192 Redefining these also allows you to specify an arbitrary objective
193 function.
195 XXX: This is still a work in progress
196 """
198 def __init__(
199 self,
200 atoms: Atoms,
201 logfile: Union[IO, str] = '-',
202 trajectory: Optional[str] = None,
203 callback_always: bool = False,
204 **kwargs,
205 ):
206 """Initialize object
208 Parameters
209 ----------
210 atoms: :class:`~ase.Atoms`
211 The Atoms object to relax.
213 trajectory: str
214 Trajectory file used to store optimisation path.
216 logfile: file object or str
217 If *logfile* is a string, a file with that name will be opened.
218 Use '-' for stdout.
220 callback_always: bool
221 Should the callback be run after each force call (also in the
222 linesearch)
224 alpha: float
225 Initial guess for the Hessian (curvature of energy surface). A
226 conservative value of 70.0 is the default, but number of needed
227 steps to converge might be less if a lower value is used. However,
228 a lower value also means risk of instability.
230 kwargs : dict, optional
231 Extra arguments passed to
232 :class:`~ase.optimize.optimize.Optimizer`.
234 """
235 restart = None
236 Optimizer.__init__(self, atoms, restart, logfile, trajectory, **kwargs)
237 self.function_calls = 0
238 self.callback_always = callback_always
240 def x0(self):
241 """Return x0 in a way SciPy can use
243 This class is mostly usable for subclasses wanting to redefine the
244 parameters (and the objective function)"""
245 return self.optimizable.get_positions().reshape(-1)
247 def f(self, x):
248 """Objective function for use of the optimizers"""
249 self.optimizable.set_positions(x.reshape(-1, 3))
250 self.function_calls += 1
251 # Scale the problem as SciPy uses I as initial Hessian.
252 return self.optimizable.get_potential_energy()
254 def callback(self, x):
255 """Callback function to be run after each iteration by SciPy
257 This should also be called once before optimization starts, as SciPy
258 optimizers only calls it after each iteration, while ase optimizers
259 call something similar before as well.
260 """
261 # We can't assume that forces are available!
262 # f = self.optimizable.get_forces()
263 # self.log(f)
264 self.call_observers()
265 # if self.converged(f):
266 # raise Converged
267 self.nsteps += 1
269 def run(self, ftol=0.01, xtol=0.01, steps=100000000):
270 self.xtol = xtol
271 self.ftol = ftol
272 # As SciPy does not log the zeroth iteration, we do that manually
273 self.callback(None)
274 try:
275 # Scale the problem as SciPy uses I as initial Hessian.
276 self.call_fmin(xtol, ftol, steps)
277 except Converged:
278 pass
279 return self.converged()
281 def dump(self, data):
282 pass
284 def load(self):
285 pass
287 def call_fmin(self, xtol, ftol, steps):
288 raise NotImplementedError
291class SciPyFmin(SciPyGradientlessOptimizer):
292 """Nelder-Mead Simplex algorithm
294 Uses only function calls.
296 XXX: This is still a work in progress
297 """
299 def call_fmin(self, xtol, ftol, steps):
300 opt.fmin(self.f,
301 self.x0(),
302 # args=(),
303 xtol=xtol,
304 ftol=ftol,
305 maxiter=steps,
306 # maxfun=None,
307 # full_output=1,
308 disp=0,
309 # retall=0,
310 callback=self.callback)
313class SciPyFminPowell(SciPyGradientlessOptimizer):
314 """Powell's (modified) level set method
316 Uses only function calls.
318 XXX: This is still a work in progress
319 """
321 def __init__(self, *args, **kwargs):
322 """Parameters:
324 direc: float
325 How much to change x to initially. Defaults to 0.04.
326 """
327 direc = kwargs.pop('direc', None)
328 SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
330 if direc is None:
331 self.direc = np.eye(len(self.x0()), dtype=float) * 0.04
332 else:
333 self.direc = np.eye(len(self.x0()), dtype=float) * direc
335 def call_fmin(self, xtol, ftol, steps):
336 opt.fmin_powell(self.f,
337 self.x0(),
338 # args=(),
339 xtol=xtol,
340 ftol=ftol,
341 maxiter=steps,
342 # maxfun=None,
343 # full_output=1,
344 disp=0,
345 # retall=0,
346 callback=self.callback,
347 direc=self.direc)