Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1import numpy as np 

2 

3from ase.optimize.sciopt import SciPyOptimizer, OptimizerConvergenceError 

4 

5 

6def ode12r(f, X0, h=None, verbose=1, fmax=1e-6, maxtol=1e3, steps=100, 

7 rtol=1e-1, C1=1e-2, C2=2.0, hmin=1e-10, extrapolate=3, 

8 callback=None, apply_precon=None, converged=None, residual=None): 

9 """ 

10 Adaptive ODE solver, which uses 1st and 2nd order approximations to 

11 estimate local error and choose a new step length. 

12 

13 This optimizer is described in detail in: 

14 

15 S. Makri, C. Ortner and J. R. Kermode, J. Chem. Phys. 

16 150, 094109 (2019) 

17 https://dx.doi.org/10.1063/1.5064465 

18 

19 Parameters 

20 ---------- 

21 

22 f : function 

23 function returning driving force on system 

24 X0 : 1-dimensional array 

25 initial value of degrees of freedom 

26 h : float 

27 step size, if None an estimate is used based on ODE12 

28 verbose: int 

29 verbosity level. 0=no output, 1=log output (default), 2=debug output. 

30 fmax : float 

31 convergence tolerance for residual force 

32 maxtol: float 

33 terminate if reisdual exceeds this value 

34 rtol : float 

35 relative tolerance 

36 C1 : float 

37 sufficient contraction parameter 

38 C2 : float 

39 residual growth control (Inf means there is no control) 

40 hmin : float 

41 minimal allowed step size 

42 extrapolate : int 

43 extrapolation style (3 seems the most robust) 

44 callback : function 

45 optional callback function to call after each update step 

46 apply_precon: function 

47 apply a apply_preconditioner to the optimisation 

48 converged: function 

49 alternative function to check convergence, rather than 

50 using a simple norm of the forces. 

51 residual: function 

52 compute the residual from the current forces 

53  

54 Returns 

55 ------- 

56 

57 X: array 

58 final value of degrees of freedom 

59 """ 

60 

61 X = X0 

62 Fn = f(X) 

63 

64 if callback is None: 

65 def callback(X): 

66 pass 

67 callback(X) 

68 

69 if residual is None: 

70 def residual(F, X): 

71 return np.linalg.norm(F, np.inf) 

72 Rn = residual(Fn, X) 

73 

74 if apply_precon is None: 

75 def apply_precon(F, X): 

76 return F, residual(F, X) 

77 Fp, Rp = apply_precon(Fn, X) 

78 

79 def log(*args): 

80 if verbose >= 1: 

81 print(*args) 

82 

83 def debug(*args): 

84 if verbose >= 2: 

85 print(*args) 

86 

87 if converged is None: 

88 def converged(F, X): 

89 return residual(F, X) <= fmax 

90 

91 if converged(Fn, X): 

92 log("ODE12r terminates successfully after 0 iterations") 

93 return X 

94 if Rn >= maxtol: 

95 raise OptimizerConvergenceError(f"ODE12r: Residual {Rn} is too large " 

96 "at iteration 0") 

97 

98 # computation of the initial step 

99 r = residual(Fp, X) # pick the biggest force 

100 if h is None: 

101 h = 0.5 * rtol ** 0.5 / r # Chose a stepsize based on that force 

102 h = max(h, hmin) # Make sure the step size is not too big 

103 

104 for nit in range(1, steps): 

105 Xnew = X + h * Fp # Pick a new position 

106 Fn_new = f(Xnew) # Calculate the new forces at this position 

107 Rn_new = residual(Fn_new, Xnew) 

108 Fp_new, Rp_new = apply_precon(Fn_new, Xnew) 

109 

110 e = 0.5 * h * (Fp_new - Fp) # Estimate the area under the forces curve 

111 err = np.linalg.norm(e, np.inf) # Error estimate 

112 

113 # Accept step if residual decreases sufficiently and/or error acceptable 

114 accept = ((Rp_new <= Rp * (1 - C1 * h)) or 

115 ((Rp_new <= Rp * C2) and err <= rtol)) 

116 

117 # Pick an extrapolation scheme for the system & find new increment 

118 y = Fp - Fp_new 

119 if extrapolate == 1: # F(xn + h Fp) 

120 h_ls = h * (Fp @ y) / (y @ y) 

121 elif extrapolate == 2: # F(Xn + h Fp) 

122 h_ls = h * (Fp @ Fp_new) / (Fp @ y + 1e-10) 

123 elif extrapolate == 3: # min | F(Xn + h Fp) | 

124 h_ls = h * (Fp @ y) / (y @ y + 1e-10) 

125 else: 

126 raise ValueError(f'invalid extrapolate value: {extrapolate}. ' 

127 'Must be 1, 2 or 3') 

128 if np.isnan(h_ls) or h_ls < hmin: # Rejects if increment is too small 

129 h_ls = np.inf 

130 

131 h_err = h * 0.5 * np.sqrt(rtol / err) 

132 

133 # Accept the step and do the update 

134 if accept: 

135 X = Xnew 

136 Rn = Rn_new 

137 Fn = Fn_new 

138 Fp = Fp_new 

139 Rp = Rp_new 

140 callback(X) 

141 

142 # We check the residuals again 

143 if converged(Fn, X): 

144 log(f"ODE12r: terminates successfully " 

145 f"after {nit} iterations.") 

146 return X 

147 if Rn >= maxtol: 

148 log(f"ODE12r: Residual {Rn} is too " 

149 f"large at iteration number {nit}") 

150 return X 

151 

152 # Compute a new step size. 

153 # Based on the extrapolation and some other heuristics 

154 h = max(0.25 * h, 

155 min(4 * h, h_err, h_ls)) # Log steep-size analytic results 

156 

157 debug(f"ODE12r: accept: new h = {h}, |F| = {Rp}") 

158 debug(f"ODE12r: hls = {h_ls}") 

159 debug(f"ODE12r: herr = {h_err}") 

160 else: 

161 # Compute a new step size. 

162 h = max(0.1 * h, min(0.25 * h, h_err, 

163 h_ls)) 

164 debug(f"ODE12r: reject: new h = {h}") 

165 debug(f"ODE12r: |Fnew| = {Rp_new}") 

166 debug(f"ODE12r: |Fold| = {Rp}") 

167 debug(f"ODE12r: |Fnew|/|Fold| = {Rp_new/Rp}") 

168 

169 # abort if step size is too small 

170 if abs(h) <= hmin: 

171 raise OptimizerConvergenceError('ODE12r terminates unsuccessfully' 

172 f' Step size {h} too small') 

173 

174 raise OptimizerConvergenceError(f'ODE12r terminates unsuccessfully after ' 

175 f'{steps} iterations.') 

176 

177 

178class ODE12r(SciPyOptimizer): 

179 """ 

180 Optimizer based on adaptive ODE solver :func:`ode12r` 

181 """ 

182 def __init__(self, atoms, logfile='-', trajectory=None, 

183 callback_always=False, alpha=1.0, master=None, 

184 force_consistent=None, precon=None, verbose=0, rtol=1e-2): 

185 SciPyOptimizer.__init__(self, atoms, logfile, trajectory, 

186 callback_always, alpha, master, 

187 force_consistent) 

188 from ase.optimize.precon.precon import make_precon # avoid circular dep 

189 self.precon = make_precon(precon) 

190 self.verbose = verbose 

191 self.rtol = rtol 

192 

193 def apply_precon(self, Fn, X): 

194 self.atoms.set_positions(X.reshape(len(self.atoms), 3)) 

195 Fn, Rn = self.precon.apply(Fn, self.atoms) 

196 return Fn, Rn 

197 

198 def call_fmin(self, fmax, steps): 

199 ode12r(lambda x: -self.fprime(x), 

200 self.x0(), 

201 fmax=fmax, steps=steps, 

202 verbose=self.verbose, 

203 apply_precon=self.apply_precon, 

204 callback=self.callback, 

205 converged=lambda F, X: self.converged(F.reshape(-1, 3)), 

206 rtol=self.rtol)