|
Mujpy /
AddMultirun< Mujpy.Subnotebook | Index | The logic behind Multirun global fit, construction of global chisquare def _load_data_multirun_user_(self,x,y,components,e=1): '''
input:
x, y, e are numpy arrays, y, e are 2d
e = 1 or missing yields unitary errors
components is a list ,...,[method,[key,...,key]]],
produced by int2_multirun_user_method_key() from mujpy.aux.aux
where method is an instantiation of a component, e.g. self.ml
and value = eval(key) produces the parameter value
_add_multirun_ must produce a 2d function f.shape(ngroup,nbins)
therefore _components_ must be a np.vectorize of nrun copies of the method
method da not allowed here, no need for alpha
no fft of residues
'''
self._x_ = x
self._y_ = y # self._global_ = True if _nglobals_ is not None else False
self._components_ = components
self._add_ = self._add_multirun_
self._e_ = e
return True, '' # no error
def _add_(x,*argv): # this is _add_multirun_(self,...)
f = zeros((self._y_.shape[0],x.shape[0])) # array(runs,bins)
p = argv this is a list of minuit internal parameters
for method, keys in self._components_: # self._components_ is a list of lists of lists defined in _load_data_multirun_user_
# method calculates one component in the model, keys the list of lists of its parameters for all runs
pars = [[key(p) for key in run_keys] for run_keys in keys] # list over runs of lists of parameters for this component
f + = method(x,*pars) # calculates component for each run and vstacks them
return f # array(nruns,nbins)
del _chisquare_((*argv):
num = abs(self._add_(self._x_,*argv) - self._y_)
normsquaredev = (num/self._e_)**2
return sum(normsquaredev) # sum is over all component
Multirun global fit, construction of gradient def _load_data_multirun_grad_(self,x,y,components,gradients,ref,e=1): '''
input:
x, y, e are numpy arrays, y, e are 2d
e = 1 or missing yields unitary errors
gradients is a list ,...,]
components is a list ,...,[method,[key,...,key]]],
produced by int2_multirun_grad_method_key() from mujpy.aux.aux
where method is an instantiation of a component, e.g. self.ml
[grad,...,grad] is its derivative with respect to pars
par = key(p) produces each parameter value
ref is a list, one per minuit parameter, of lists ,
_add_multirun_ must produce a 2d function f.shape(ngroup,nbins)
therefore _components_ must be a np.vectorize of nrun copies of the method
method da not allowed here, no need for alpha
no fft of residues
'''
self._x_ = x
self._y_ = y # self._global_ = True if _nglobals_ is not None else False
self._components_ = components
self._gradients_ = gradients
self._add_ = self._add_multirun_
self._e_ = e
return True, '' # no error
def _add_grad_(x,k,*argv): # k in range(len(argv)) g = zeros((self._y_.shape[0],x.shape[0])) # array(runs,bins) this is one component of the gradient
p = argv
for kcomp,kpar self.ref[k]: # self.ref is a list [[component,parameter],...], defined in _load_data_multirun_user_ such that parameter depends on p[k]
grad, keys = self._gradients_[kcomp] # grad is the gradients of the kcomp component in the model, keys the list of lists of its parameters for all runs
pars = [[key(p) for key in run_keys] for run_keys in keys] # list over runs of lists of parameters for kcomp component
g + = grad(x,*pars)[kpar] # calculates the gradient kpar-th-component for each run and vstacks them
return g # array(nruns,nbins) is the k-th gradient function
def _grad_((*argv): g = -2*(self._add_(self._x_,*argv)-self._y_)/self._e_**2
gg = []
for k in len(argv):
gg.append(sum(g*self._add_grad_(self._x_,k,*argv))) # sum is over rows (runs) and columns (time bins)
return gg
< Mujpy.Subnotebook | Index | The logic behind |