Commit 0b46e7dc authored by Viktor Reshniak's avatar Viktor Reshniak
Browse files

add minimize to loss class

parent c5112f21
Loading
Loading
Loading
Loading
+54 −43
Original line number Diff line number Diff line
@@ -3,7 +3,7 @@ import time
from pathlib import Path

import numpy as np
from scipy.optimize import minimize
from scipy.optimize import minimize as scipy_minimize
from scipy.linalg import svd
from numpy.linalg import eig
from scipy.special import loggamma, erf
@@ -1334,6 +1334,32 @@ class MLELoss(Loss):
	def d2loss_dfit2(self):
		return self.data / self._fit**2

	def minimize(self, solver, tol, maxfun, params_init, params_lbnd, params_ubnd, disp=_debug):
		# from scipy.optimize import BFGS
		# class myBFGS(BFGS):
		# 	def initialize(self, n, approx_type):
		# 		super().initialize(n, approx_type)
		# 		if self.approx_type == 'hess':
		# 			self.B = loss_fun.hessian(params_init)
		# 		else:
		# 			self.H = np.linalg.inv(loss_fun.hessian(params_init))
		return scipy_minimize(self,
			jac  = self.gradient,
			hess = self.hessian,
			# hess = myBFGS(),
			x0=params_init, bounds=tuple(zip(params_lbnd,params_ubnd)),
			method=solver, options={'maxiter':1000, 'maxfun':maxfun, 'maxls':100, 'maxcor':100, 'ftol':1.e-10, 'gtol':tol, 'xtol':1.e-10, 'disp':disp}
			# method='L-BFGS-B', options={'maxiter':1000, 'maxfun':1000, 'maxls':20, 'maxcor':100, 'ftol':1.e-6, 'gtol':1.e-6, 'disp':True}
			# method='BFGS', options={'maxiter':1000, 'gtol':1.e-6, 'norm':np.inf, 'disp':True}
			# method='CG', options={'maxiter':1000, 'gtol':1.e-5, 'norm':np.inf, 'disp':True}
			# method='Newton-CG', options={'maxiter':1000, 'xtol':1.e-6, 'disp':True}
			# method='Nelder-Mead', options={'maxiter':1000, 'disp':False}
			# method='TNC', options={'scale':None, 'maxfun':1000, 'ftol':1.e-3, 'gtol':1.e-5, 'disp':True}
			# method='dogleg', options={'maxiter':1000, 'tol':1.e-6, 'gtol':1.e-8, 'disp':True}
			# method='trust-krylov', options={'maxiter':1000, 'tol':1.e-6, 'inexact':False, 'disp':True}
			# method='trust-exact', options={'maxiter':1000, 'gtol':1.e-4, 'disp':True}
			# method='trust-constr', options={'maxiter':1000, 'gtol':1.e-4, 'disp':True}
			)

###############################################################################
###############################################################################
@@ -1919,15 +1945,12 @@ class PeakHistogram(object):
				x0=params_init, bounds=[params_lbnd,params_ubnd], method='trf', verbose=0, max_nfev=1000)
		elif loss=='mle':
			loss_fun = MLELoss(bkgr_fun=self.bkgr_fun, peak_fun=self.peak_fun, points=fit_points, data=fit_data)

			# from scipy.optimize import BFGS
			# class myBFGS(BFGS):
			# 	def initialize(self, n, approx_type):
			# 		super().initialize(n, approx_type)
			# 		if self.approx_type == 'hess':
			# 			self.B = loss_fun.hessian(params_init)
			# 		else:
			# 			self.H = np.linalg.inv(loss_fun.hessian(params_init))
			result = loss_fun.minimize(solver, tol,
				maxfun=100,
				params_init=params_init,
				params_lbnd=params_lbnd,
				params_ubnd=params_ubnd,
				disp=True)

			# result = fmin_bfgs_2(loss_fun, params_init, fprime=loss_fun.gradient, gtol=1e-6, maxiter=1000, disp=2, init_hess=None)#(loss_fun.hessian(params_init)+loss_fun.hessian(params_init).T)/2)
			# result = fmin_bfgs_2(loss_fun, result.x, fprime=loss_fun.gradient, gtol=1e-6, maxiter=1000, disp=2, init_hess=loss_fun.hessian(result.x))
@@ -1938,39 +1961,27 @@ class PeakHistogram(object):
			# 	method='Newton-CG', options={'maxiter':1000, 'maxfun':1000, 'maxls':20, 'maxcor':100, 'ftol':1.e-8, 'gtol':1.e-6, 'xtol':1.e-6, 'disp':True} #_debug}
			# 	)

			result = minimize(loss_fun,
				jac  = loss_fun.gradient,
				hess = loss_fun.hessian,
				# hess = myBFGS(), #loss_fun.hessian,
				x0=params_init, bounds=tuple(zip(params_lbnd,params_ubnd)),
				method=solver, options={'maxiter':1000, 'maxfun':1000, 'maxls':20, 'maxcor':100, 'ftol':1.e-8, 'gtol':1.e-6, 'xtol':1.e-6, 'disp':True} #_debug}
				# method='L-BFGS-B', options={'maxiter':1000, 'maxfun':1000, 'maxls':20, 'maxcor':100, 'ftol':1.e-6, 'gtol':1.e-6, 'disp':True}
				# method='BFGS', options={'maxiter':1000, 'gtol':1.e-6, 'norm':np.inf, 'disp':True}
				# method='CG', options={'maxiter':1000, 'gtol':1.e-5, 'norm':np.inf, 'disp':True}
				# method='Newton-CG', options={'maxiter':1000, 'xtol':1.e-6, 'disp':True}
				# method='Nelder-Mead', options={'maxiter':1000, 'disp':False}
				# method='TNC', options={'scale':None, 'maxfun':1000, 'ftol':1.e-3, 'gtol':1.e-5, 'disp':True}
				# method='dogleg', options={'maxiter':1000, 'tol':1.e-6, 'gtol':1.e-8, 'disp':True}
				# method='trust-krylov', options={'maxiter':1000, 'tol':1.e-6, 'inexact':False, 'disp':True}
				# method='trust-exact', options={'maxiter':1000, 'gtol':1.e-4, 'disp':True}
				# method='trust-constr', options={'maxiter':1000, 'gtol':1.e-4, 'disp':True}
				)
			# # restrict parameters
			# if solver!='L-BFGS-B':
			# 	for i in range(len(result.x)):
			# 		result.x[i] = max(min(result.x[i],params_ubnd[i]),params_lbnd[i])

			# print(peak_loss.func_calls, peak_loss.grad_calls, peak_loss.hess_calls)
			# g,dg = gaussian_mixture(result.x[nbkgr:],fit_points,npeaks=1,covariance_parameterization=covariance_parameterization,return_gradient=True)
			# fit = result.x[0]**2 + g.reshape(fit_data.shape)
			# res = (fit-fit_data) / fit
			# grad = res.reshape((1,*fit_data.shape)) * dg.reshape((-1,*fit_data.shape))
			# grad = grad.reshape((grad.shape[0],-1)).sum(axis=1)
			# grad = np.array( [2*result.x[0]*res.sum(),0,0,0] + list(grad) + [0]*3)
			# print(result.jac)
			# print(grad)

			self.init_params = np.array(params_init)
			# result = minimize(loss_fun,
			# 	jac  = loss_fun.gradient,
			# 	hess = loss_fun.hessian,
			# 	# hess = myBFGS(), #loss_fun.hessian,
			# 	x0=params_init, bounds=tuple(zip(params_lbnd,params_ubnd)),
			# 	method=solver, options={'maxiter':1000, 'maxfun':1000, 'maxls':20, 'maxcor':100, 'ftol':1.e-8, 'gtol':1.e-6, 'xtol':1.e-6, 'disp':True} #_debug}
			# 	# method='L-BFGS-B', options={'maxiter':1000, 'maxfun':1000, 'maxls':20, 'maxcor':100, 'ftol':1.e-6, 'gtol':1.e-6, 'disp':True}
			# 	# method='BFGS', options={'maxiter':1000, 'gtol':1.e-6, 'norm':np.inf, 'disp':True}
			# 	# method='CG', options={'maxiter':1000, 'gtol':1.e-5, 'norm':np.inf, 'disp':True}
			# 	# method='Newton-CG', options={'maxiter':1000, 'xtol':1.e-6, 'disp':True}
			# 	# method='Nelder-Mead', options={'maxiter':1000, 'disp':False}
			# 	# method='TNC', options={'scale':None, 'maxfun':1000, 'ftol':1.e-3, 'gtol':1.e-5, 'disp':True}
			# 	# method='dogleg', options={'maxiter':1000, 'tol':1.e-6, 'gtol':1.e-8, 'disp':True}
			# 	# method='trust-krylov', options={'maxiter':1000, 'tol':1.e-6, 'inexact':False, 'disp':True}
			# 	# method='trust-exact', options={'maxiter':1000, 'gtol':1.e-4, 'disp':True}
			# 	# method='trust-constr', options={'maxiter':1000, 'gtol':1.e-4, 'disp':True}
			# 	)

			self.init_loss.append(loss_fun(params_init))
			self.init_params.append(np.array(params_init))

			self.fit_params  = result.x

			if _debug: