Coverage for /builds/debichem-team/python-ase/ase/optimize/sciopt.py: 68.22%

107 statements  

« prev     ^ index     » next       coverage.py v7.5.3, created at 2025-03-06 04:00 +0000

1from typing import IO, Optional, Union 

2 

3import numpy as np 

4import scipy.optimize as opt 

5 

6from ase import Atoms 

7from ase.optimize.optimize import Optimizer 

8 

9 

10class Converged(Exception): 

11 pass 

12 

13 

14class OptimizerConvergenceError(Exception): 

15 pass 

16 

17 

18class SciPyOptimizer(Optimizer): 

19 """General interface for SciPy optimizers 

20 

21 Only the call to the optimizer is still needed 

22 """ 

23 

24 def __init__( 

25 self, 

26 atoms: Atoms, 

27 logfile: Union[IO, str] = '-', 

28 trajectory: Optional[str] = None, 

29 callback_always: bool = False, 

30 alpha: float = 70.0, 

31 **kwargs, 

32 ): 

33 """Initialize object 

34 

35 Parameters 

36 ---------- 

37 atoms: :class:`~ase.Atoms` 

38 The Atoms object to relax. 

39 

40 trajectory: str 

41 Trajectory file used to store optimisation path. 

42 

43 logfile: file object or str 

44 If *logfile* is a string, a file with that name will be opened. 

45 Use '-' for stdout. 

46 

47 callback_always: bool 

48 Should the callback be run after each force call (also in the 

49 linesearch) 

50 

51 alpha: float 

52 Initial guess for the Hessian (curvature of energy surface). A 

53 conservative value of 70.0 is the default, but number of needed 

54 steps to converge might be less if a lower value is used. However, 

55 a lower value also means risk of instability. 

56 

57 kwargs : dict, optional 

58 Extra arguments passed to 

59 :class:`~ase.optimize.optimize.Optimizer`. 

60 

61 """ 

62 restart = None 

63 Optimizer.__init__(self, atoms, restart, logfile, trajectory, **kwargs) 

64 self.force_calls = 0 

65 self.callback_always = callback_always 

66 self.H0 = alpha 

67 self.max_steps = 0 

68 

69 def x0(self): 

70 """Return x0 in a way SciPy can use 

71 

72 This class is mostly usable for subclasses wanting to redefine the 

73 parameters (and the objective function)""" 

74 return self.optimizable.get_positions().reshape(-1) 

75 

76 def f(self, x): 

77 """Objective function for use of the optimizers""" 

78 self.optimizable.set_positions(x.reshape(-1, 3)) 

79 # Scale the problem as SciPy uses I as initial Hessian. 

80 return self.optimizable.get_potential_energy() / self.H0 

81 

82 def fprime(self, x): 

83 """Gradient of the objective function for use of the optimizers""" 

84 self.optimizable.set_positions(x.reshape(-1, 3)) 

85 self.force_calls += 1 

86 

87 if self.callback_always: 

88 self.callback(x) 

89 

90 # Remember that forces are minus the gradient! 

91 # Scale the problem as SciPy uses I as initial Hessian. 

92 return - self.optimizable.get_forces().reshape(-1) / self.H0 

93 

94 def callback(self, x): 

95 """Callback function to be run after each iteration by SciPy 

96 

97 This should also be called once before optimization starts, as SciPy 

98 optimizers only calls it after each iteration, while ase optimizers 

99 call something similar before as well. 

100 

101 :meth:`callback`() can raise a :exc:`Converged` exception to signal the 

102 optimisation is complete. This will be silently ignored by 

103 :meth:`run`(). 

104 """ 

105 if self.nsteps < self.max_steps: 

106 self.nsteps += 1 

107 f = self.optimizable.get_forces() 

108 self.log(f) 

109 self.call_observers() 

110 if self.converged(f): 

111 raise Converged 

112 

113 def run(self, fmax=0.05, steps=100000000): 

114 self.fmax = fmax 

115 

116 try: 

117 # As SciPy does not log the zeroth iteration, we do that manually 

118 if self.nsteps == 0: 

119 self.log() 

120 self.call_observers() 

121 

122 self.max_steps = steps + self.nsteps 

123 

124 # Scale the problem as SciPy uses I as initial Hessian. 

125 self.call_fmin(fmax / self.H0, steps) 

126 except Converged: 

127 pass 

128 return self.converged() 

129 

130 def dump(self, data): 

131 pass 

132 

133 def load(self): 

134 pass 

135 

136 def call_fmin(self, fmax, steps): 

137 raise NotImplementedError 

138 

139 

140class SciPyFminCG(SciPyOptimizer): 

141 """Non-linear (Polak-Ribiere) conjugate gradient algorithm""" 

142 

143 def call_fmin(self, fmax, steps): 

144 output = opt.fmin_cg(self.f, 

145 self.x0(), 

146 fprime=self.fprime, 

147 # args=(), 

148 gtol=fmax * 0.1, # Should never be reached 

149 norm=np.inf, 

150 # epsilon= 

151 maxiter=steps, 

152 full_output=1, 

153 disp=0, 

154 # retall=0, 

155 callback=self.callback) 

156 warnflag = output[-1] 

157 if warnflag == 2: 

158 raise OptimizerConvergenceError( 

159 'Warning: Desired error not necessarily achieved ' 

160 'due to precision loss') 

161 

162 

163class SciPyFminBFGS(SciPyOptimizer): 

164 """Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)""" 

165 

166 def call_fmin(self, fmax, steps): 

167 output = opt.fmin_bfgs(self.f, 

168 self.x0(), 

169 fprime=self.fprime, 

170 # args=(), 

171 gtol=fmax * 0.1, # Should never be reached 

172 norm=np.inf, 

173 # epsilon=1.4901161193847656e-08, 

174 maxiter=steps, 

175 full_output=1, 

176 disp=0, 

177 # retall=0, 

178 callback=self.callback) 

179 warnflag = output[-1] 

180 if warnflag == 2: 

181 raise OptimizerConvergenceError( 

182 'Warning: Desired error not necessarily achieved ' 

183 'due to precision loss') 

184 

185 

186class SciPyGradientlessOptimizer(Optimizer): 

187 """General interface for gradient less SciPy optimizers 

188 

189 Only the call to the optimizer is still needed 

190 

191 Note: If you redefine x0() and f(), you don't even need an atoms object. 

192 Redefining these also allows you to specify an arbitrary objective 

193 function. 

194 

195 XXX: This is still a work in progress 

196 """ 

197 

198 def __init__( 

199 self, 

200 atoms: Atoms, 

201 logfile: Union[IO, str] = '-', 

202 trajectory: Optional[str] = None, 

203 callback_always: bool = False, 

204 **kwargs, 

205 ): 

206 """Initialize object 

207 

208 Parameters 

209 ---------- 

210 atoms: :class:`~ase.Atoms` 

211 The Atoms object to relax. 

212 

213 trajectory: str 

214 Trajectory file used to store optimisation path. 

215 

216 logfile: file object or str 

217 If *logfile* is a string, a file with that name will be opened. 

218 Use '-' for stdout. 

219 

220 callback_always: bool 

221 Should the callback be run after each force call (also in the 

222 linesearch) 

223 

224 alpha: float 

225 Initial guess for the Hessian (curvature of energy surface). A 

226 conservative value of 70.0 is the default, but number of needed 

227 steps to converge might be less if a lower value is used. However, 

228 a lower value also means risk of instability. 

229 

230 kwargs : dict, optional 

231 Extra arguments passed to 

232 :class:`~ase.optimize.optimize.Optimizer`. 

233 

234 """ 

235 restart = None 

236 Optimizer.__init__(self, atoms, restart, logfile, trajectory, **kwargs) 

237 self.function_calls = 0 

238 self.callback_always = callback_always 

239 

240 def x0(self): 

241 """Return x0 in a way SciPy can use 

242 

243 This class is mostly usable for subclasses wanting to redefine the 

244 parameters (and the objective function)""" 

245 return self.optimizable.get_positions().reshape(-1) 

246 

247 def f(self, x): 

248 """Objective function for use of the optimizers""" 

249 self.optimizable.set_positions(x.reshape(-1, 3)) 

250 self.function_calls += 1 

251 # Scale the problem as SciPy uses I as initial Hessian. 

252 return self.optimizable.get_potential_energy() 

253 

254 def callback(self, x): 

255 """Callback function to be run after each iteration by SciPy 

256 

257 This should also be called once before optimization starts, as SciPy 

258 optimizers only calls it after each iteration, while ase optimizers 

259 call something similar before as well. 

260 """ 

261 # We can't assume that forces are available! 

262 # f = self.optimizable.get_forces() 

263 # self.log(f) 

264 self.call_observers() 

265 # if self.converged(f): 

266 # raise Converged 

267 self.nsteps += 1 

268 

269 def run(self, ftol=0.01, xtol=0.01, steps=100000000): 

270 self.xtol = xtol 

271 self.ftol = ftol 

272 # As SciPy does not log the zeroth iteration, we do that manually 

273 self.callback(None) 

274 try: 

275 # Scale the problem as SciPy uses I as initial Hessian. 

276 self.call_fmin(xtol, ftol, steps) 

277 except Converged: 

278 pass 

279 return self.converged() 

280 

281 def dump(self, data): 

282 pass 

283 

284 def load(self): 

285 pass 

286 

287 def call_fmin(self, xtol, ftol, steps): 

288 raise NotImplementedError 

289 

290 

291class SciPyFmin(SciPyGradientlessOptimizer): 

292 """Nelder-Mead Simplex algorithm 

293 

294 Uses only function calls. 

295 

296 XXX: This is still a work in progress 

297 """ 

298 

299 def call_fmin(self, xtol, ftol, steps): 

300 opt.fmin(self.f, 

301 self.x0(), 

302 # args=(), 

303 xtol=xtol, 

304 ftol=ftol, 

305 maxiter=steps, 

306 # maxfun=None, 

307 # full_output=1, 

308 disp=0, 

309 # retall=0, 

310 callback=self.callback) 

311 

312 

313class SciPyFminPowell(SciPyGradientlessOptimizer): 

314 """Powell's (modified) level set method 

315 

316 Uses only function calls. 

317 

318 XXX: This is still a work in progress 

319 """ 

320 

321 def __init__(self, *args, **kwargs): 

322 """Parameters: 

323 

324 direc: float 

325 How much to change x to initially. Defaults to 0.04. 

326 """ 

327 direc = kwargs.pop('direc', None) 

328 SciPyGradientlessOptimizer.__init__(self, *args, **kwargs) 

329 

330 if direc is None: 

331 self.direc = np.eye(len(self.x0()), dtype=float) * 0.04 

332 else: 

333 self.direc = np.eye(len(self.x0()), dtype=float) * direc 

334 

335 def call_fmin(self, xtol, ftol, steps): 

336 opt.fmin_powell(self.f, 

337 self.x0(), 

338 # args=(), 

339 xtol=xtol, 

340 ftol=ftol, 

341 maxiter=steps, 

342 # maxfun=None, 

343 # full_output=1, 

344 disp=0, 

345 # retall=0, 

346 callback=self.callback, 

347 direc=self.direc)