Coverage for /builds/debichem-team/python-ase/ase/optimize/bfgslinesearch.py: 84.06%
138 statements
« prev ^ index » next coverage.py v7.5.3, created at 2025-03-06 04:00 +0000
« prev ^ index » next coverage.py v7.5.3, created at 2025-03-06 04:00 +0000
1# ******NOTICE***************
2# optimize.py module by Travis E. Oliphant
3#
4# You may copy and use this module as you see fit with no
5# guarantee implied provided you keep this notice in all copies.
6# *****END NOTICE************
8import time
9from typing import IO, Optional, Union
11import numpy as np
12from numpy import absolute, eye, isinf, sqrt
14from ase import Atoms
15from ase.optimize.optimize import Optimizer
16from ase.utils.linesearch import LineSearch
18# These have been copied from Numeric's MLab.py
19# I don't think they made the transition to scipy_core
21# Modified from scipy_optimize
22abs = absolute
23pymin = min
24pymax = max
25__version__ = '0.1'
28class BFGSLineSearch(Optimizer):
29 def __init__(
30 self,
31 atoms: Atoms,
32 restart: Optional[str] = None,
33 logfile: Union[IO, str] = '-',
34 maxstep: float = None,
35 trajectory: Optional[str] = None,
36 c1: float = 0.23,
37 c2: float = 0.46,
38 alpha: float = 10.0,
39 stpmax: float = 50.0,
40 **kwargs,
41 ):
42 """Optimize atomic positions in the BFGSLineSearch algorithm, which
43 uses both forces and potential energy information.
45 Parameters
46 ----------
47 atoms: :class:`~ase.Atoms`
48 The Atoms object to relax.
50 restart: str
51 JSON file used to store hessian matrix. If set, file with
52 such a name will be searched and hessian matrix stored will
53 be used, if the file exists.
55 trajectory: str
56 Trajectory file used to store optimisation path.
58 maxstep: float
59 Used to set the maximum distance an atom can move per
60 iteration (default value is 0.2 Angstroms).
62 logfile: file object or str
63 If *logfile* is a string, a file with that name will be opened.
64 Use '-' for stdout.
66 kwargs : dict, optional
67 Extra arguments passed to
68 :class:`~ase.optimize.optimize.Optimizer`.
70 """
71 if maxstep is None:
72 self.maxstep = self.defaults['maxstep']
73 else:
74 self.maxstep = maxstep
75 self.stpmax = stpmax
76 self.alpha = alpha
77 self.H = None
78 self.c1 = c1
79 self.c2 = c2
80 self.force_calls = 0
81 self.function_calls = 0
82 self.r0 = None
83 self.g0 = None
84 self.e0 = None
85 self.load_restart = False
86 self.task = 'START'
87 self.rep_count = 0
88 self.p = None
89 self.alpha_k = None
90 self.no_update = False
91 self.replay = False
93 Optimizer.__init__(self, atoms, restart, logfile, trajectory, **kwargs)
95 def read(self):
96 self.r0, self.g0, self.e0, self.task, self.H = self.load()
97 self.load_restart = True
99 def reset(self):
100 self.H = None
101 self.r0 = None
102 self.g0 = None
103 self.e0 = None
104 self.rep_count = 0
106 def step(self, forces=None):
107 optimizable = self.optimizable
109 if forces is None:
110 forces = optimizable.get_forces()
112 if optimizable.is_neb():
113 raise TypeError('NEB calculations cannot use the BFGSLineSearch'
114 ' optimizer. Use BFGS or another optimizer.')
115 r = optimizable.get_positions()
116 r = r.reshape(-1)
117 g = -forces.reshape(-1) / self.alpha
118 p0 = self.p
119 self.update(r, g, self.r0, self.g0, p0)
120 # o,v = np.linalg.eigh(self.B)
121 e = self.func(r)
123 self.p = -np.dot(self.H, g)
124 p_size = np.sqrt((self.p**2).sum())
125 if p_size <= np.sqrt(len(optimizable) * 1e-10):
126 self.p /= (p_size / np.sqrt(len(optimizable) * 1e-10))
127 ls = LineSearch()
128 self.alpha_k, e, self.e0, self.no_update = \
129 ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
130 maxstep=self.maxstep, c1=self.c1,
131 c2=self.c2, stpmax=self.stpmax)
132 if self.alpha_k is None:
133 raise RuntimeError("LineSearch failed!")
135 dr = self.alpha_k * self.p
136 optimizable.set_positions((r + dr).reshape(len(optimizable), -1))
137 self.r0 = r
138 self.g0 = g
139 self.dump((self.r0, self.g0, self.e0, self.task, self.H))
141 def update(self, r, g, r0, g0, p0):
142 self.I = eye(len(self.optimizable) * 3, dtype=int)
143 if self.H is None:
144 self.H = eye(3 * len(self.optimizable))
145 # self.B = np.linalg.inv(self.H)
146 return
147 else:
148 dr = r - r0
149 dg = g - g0
150 # self.alpha_k can be None!!!
151 if not (((self.alpha_k or 0) > 0 and
152 abs(np.dot(g, p0)) - abs(np.dot(g0, p0)) < 0) or
153 self.replay):
154 return
155 if self.no_update is True:
156 print('skip update')
157 return
159 try: # this was handled in numeric, let it remain for more safety
160 rhok = 1.0 / (np.dot(dg, dr))
161 except ZeroDivisionError:
162 rhok = 1000.0
163 print("Divide-by-zero encountered: rhok assumed large")
164 if isinf(rhok): # this is patch for np
165 rhok = 1000.0
166 print("Divide-by-zero encountered: rhok assumed large")
167 A1 = self.I - dr[:, np.newaxis] * dg[np.newaxis, :] * rhok
168 A2 = self.I - dg[:, np.newaxis] * dr[np.newaxis, :] * rhok
169 self.H = (np.dot(A1, np.dot(self.H, A2)) +
170 rhok * dr[:, np.newaxis] * dr[np.newaxis, :])
171 # self.B = np.linalg.inv(self.H)
173 def func(self, x):
174 """Objective function for use of the optimizers"""
175 self.optimizable.set_positions(x.reshape(-1, 3))
176 self.function_calls += 1
177 # Scale the problem as SciPy uses I as initial Hessian.
178 return self.optimizable.get_potential_energy() / self.alpha
180 def fprime(self, x):
181 """Gradient of the objective function for use of the optimizers"""
182 self.optimizable.set_positions(x.reshape(-1, 3))
183 self.force_calls += 1
184 # Remember that forces are minus the gradient!
185 # Scale the problem as SciPy uses I as initial Hessian.
186 forces = self.optimizable.get_forces().reshape(-1)
187 return - forces / self.alpha
189 def replay_trajectory(self, traj):
190 """Initialize hessian from old trajectory."""
191 self.replay = True
192 from ase.utils import IOContext
194 with IOContext() as files:
195 if isinstance(traj, str):
196 from ase.io.trajectory import Trajectory
197 traj = files.closelater(Trajectory(traj, mode='r'))
199 r0 = None
200 g0 = None
201 for i in range(len(traj) - 1):
202 r = traj[i].get_positions().ravel()
203 g = - traj[i].get_forces().ravel() / self.alpha
204 self.update(r, g, r0, g0, self.p)
205 self.p = -np.dot(self.H, g)
206 r0 = r.copy()
207 g0 = g.copy()
208 self.r0 = r0
209 self.g0 = g0
211 def log(self, forces=None):
212 if self.logfile is None:
213 return
214 if forces is None:
215 forces = self.optimizable.get_forces()
216 fmax = sqrt((forces**2).sum(axis=1).max())
217 e = self.optimizable.get_potential_energy()
218 T = time.localtime()
219 name = self.__class__.__name__
220 w = self.logfile.write
221 if self.nsteps == 0:
222 w('%s %4s[%3s] %8s %15s %12s\n' %
223 (' ' * len(name), 'Step', 'FC', 'Time', 'Energy', 'fmax'))
224 w('%s: %3d[%3d] %02d:%02d:%02d %15.6f %12.4f\n'
225 % (name, self.nsteps, self.force_calls, T[3], T[4], T[5], e,
226 fmax))
227 self.logfile.flush()
230def wrap_function(function, args):
231 ncalls = [0]
233 def function_wrapper(x):
234 ncalls[0] += 1
235 return function(x, *args)
236 return ncalls, function_wrapper