diff --git a/scripts/Calibration/Examples/TubeCalibDemoMerlin.py b/scripts/Calibration/Examples/TubeCalibDemoMerlin.py index 10e0381418697434df6cc57d3627945f23bc5a0c..aa7eeabfd432d0662121e4e3d01ecad45ab71ac4 100644 --- a/scripts/Calibration/Examples/TubeCalibDemoMerlin.py +++ b/scripts/Calibration/Examples/TubeCalibDemoMerlin.py @@ -149,11 +149,11 @@ def calibrateMerlin(filename): CalibratedComponent = ['MERLIN/door3/tube_1_%d' % (i) for i in range(1, 9)] half_diff_center = ( - 2.92713867188 - 1.22879882813) / 2 # difference among the expected center position for both tubes - - # here a little bit of attempts is necessary. The efective center position and lengh is different for the calibrated tube, that - # is the reason, the calibrated values of the smaller tube does not seems aligned with the others. By, finding the 'best' half_diff_center - # value, the alignment occurs nicely. + 2.92713867188 - 1.22879882813) / 2 # difference among the expected center position for + # both tubes here a little bit of attempts is necessary. + # The effective center position and lengh is different for the calibrated tube, that is the reason, + # the calibrated values of the smaller tube does not seems aligned with the others. By, finding the + # 'best' half_diff_center value, the alignment occurs nicely. half_diff_center = 0.835 # # the knownpositions were given with the center of the bigger tube as origin, to convert diff --git a/scripts/DiamondAttenuationCorrection/FitTrans.py b/scripts/DiamondAttenuationCorrection/FitTrans.py index e18aeebdae721a3b435c6f046bc4b3b1968f57f2..a7c2495e90d3df47909645bbc60ffe6649bd016d 100644 --- a/scripts/DiamondAttenuationCorrection/FitTrans.py +++ b/scripts/DiamondAttenuationCorrection/FitTrans.py @@ -59,8 +59,8 @@ def calcDspacing(a, b, c, alp, bet, gam, h, k, l): sb = np.sin(np.radians(bet)) sg = np.sin(np.radians(gam)) - oneoverdsq = (1.0 - ca**2 - cb**2 - cg**2 + 2 * ca * cb * cg)**(-1) * \ - ((h * sa / a)**2 + (k * sb / b)**2 + (l * sg / c)**2 + oneoverdsq = (1.0 - ca ** 2 - cb ** 2 - cg ** 2 + 2 * ca * cb * cg) ** (-1) * \ + ((h * sa / a) ** 2 + (k * sb / b) ** 2 + (l * sg / c) ** 2 + (2 * k * l / (b * c)) * (cb * cg - ca) + (2 * l * h / (c * a)) * (cg * ca - cb) + (2 * h * k / (a * b)) * (ca * cb - cg)) @@ -121,22 +121,22 @@ def forbidden(h, k, l): result = 0 # condition 1 - if ((h != 0)and (k != 0) and (l != 0)): # general hkl + if ((h != 0) and (k != 0) and (l != 0)): # general hkl term1 = h + k term2 = h + l # all have to be even term3 = k + l - if not((term1 % 2) == 0 and (term2 % 2) == 0 and (term3 % 2) == 0): + if not ((term1 % 2) == 0 and (term2 % 2) == 0 and (term3 % 2) == 0): result = 1 boolresult = bool(result) return boolresult else: result = 0 - #% condition 2 + # % condition 2 if ((h == 0) and (k != 0) and (l != 0)): # 0kl reflections term1 = k + l mod4 = mod(term1, 4) - if not(mod4 == 0 and mod(k, 2) == 0 and mod(l, 2) == 0): + if not (mod4 == 0 and mod(k, 2) == 0 and mod(l, 2) == 0): result = 1 boolresult = bool(result) return boolresult @@ -145,7 +145,7 @@ def forbidden(h, k, l): # condition 3 if (h == k): # hhl reflections - if not(mod(h + l, 2) == 0): + if not (mod(h + l, 2) == 0): result = 1 boolresult = bool(result) return boolresult @@ -153,9 +153,9 @@ def forbidden(h, k, l): result = 0 # condition 4 - if ((h == 0) and (k == 0) and (l != 0)): # 00l reflections not including 000 + if ((h == 0) and (k == 0) and (l != 0)): # 00l reflections not including 000 mod4 = mod(l, 4) - if not(mod4 == 0): + if not (mod4 == 0): result = 1 boolresult = bool(result) return boolresult @@ -191,7 +191,7 @@ def allowedDiamRefs(hmin, hmax, kmin, kmax, lmin, lmax): # create new array with all h!=0 k!=0 l!=0 hkl = np.zeros(shape=(0, 3)) for i in range(n): - if not(allhkl[i][0] == 0 and allhkl[i][1] == 0 and allhkl[i][2] == 0): + if not (allhkl[i][0] == 0 and allhkl[i][1] == 0 and allhkl[i][2] == 0): hkl = np.vstack((hkl, [allhkl[i][0], allhkl[i][1], allhkl[i][2]])) d.append(calcDspacing(3.56683, 3.56683, 3.56683, 90, 90, 90, hkl[k][0], hkl[k][1], hkl[k][2])) @@ -224,7 +224,7 @@ def getISAWub(fullfilename): ''' fileID = fullfilename if fileID == 1: - print('Error opening file: ' + fullfilename) + print(('Error opening file: ' + fullfilename)) f = open(fileID, "r") lines = f.readlines() f.close() @@ -236,8 +236,9 @@ def getISAWub(fullfilename): UB[i][0], UB[i][1], UB[i][2] = lines[i].split() UB = UB.transpose() for i in range(3, 5): - lattice[i - 3][0], lattice[i - 3][1], lattice[i - 3][2], lattice[i - 3][3], lattice[i - 3][4], lattice[i - 3][5], \ - non = lines[i].split() + lattice[i - 3][0], lattice[i - 3][1], lattice[i - 3][2], lattice[i - 3][3], lattice[i - 3][4], lattice[i - 3][ + 5], \ + non = lines[i].split() print('Successfully got UB and lattice') @@ -260,93 +261,93 @@ def pkintread(hkl, loc): returns pkint = np. array - 1D vector ''' - #A = np.genfromtxt('diamond_reflist.csv', delimiter=',', skip_header=True) + # A = np.genfromtxt('diamond_reflist.csv', delimiter=',', skip_header=True) # print A - A = np.array([[1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 8.00000000e+00, - 2.06110000e+00, 5.54000000e+04], - [2.00000000e+00, 2.00000000e+00, 0.00000000e+00, 1.20000000e+01, - 1.26220000e+00, 7.52000000e+04], - [3.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01, - 1.07640000e+00, 2.98000000e+04], - [2.00000000e+00, 2.00000000e+00, 2.00000000e+00, 8.00000000e+00, - 1.03060000e+00, 2.50000000e-25], - [4.00000000e+00, 0.00000000e+00, 0.00000000e+00, 6.00000000e+00, - 8.92500000e-01, 4.05000000e+04], - [3.00000000e+00, 3.00000000e+00, 1.00000000e+00, 2.40000000e+01, - 8.19000000e-01, 1.61000000e+04], - [4.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01, - 7.28700000e-01, 2.18000000e+04], - [5.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01, - 6.87000000e-01, 8.64000000e+03], - [3.00000000e+00, 3.00000000e+00, 3.00000000e+00, 8.00000000e+00, - 6.87000000e-01, 8.64000000e+03], - [4.00000000e+00, 4.00000000e+00, 0.00000000e+00, 1.20000000e+01, - 6.31100000e-01, 1.17000000e+04], - [5.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01, - 6.03400000e-01, 4.65000000e+03], - [4.00000000e+00, 4.00000000e+00, 2.00000000e+00, 2.40000000e+01, - 5.95000000e-01, 1.83000000e-12], - [6.00000000e+00, 2.00000000e+00, 0.00000000e+00, 2.40000000e+01, - 5.64500000e-01, 6.31000000e+03], - [5.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01, - 5.44400000e-01, 2.50000000e+03], - [6.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01, - 5.38200000e-01, 8.80000000e-26], - [4.00000000e+00, 4.00000000e+00, 4.00000000e+00, 8.00000000e+00, - 5.15300000e-01, 3.40000000e+03], - [5.00000000e+00, 5.00000000e+00, 1.00000000e+00, 2.40000000e+01, - 4.99900000e-01, 1.35000000e+03], - [7.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01, - 4.99900000e-01, 1.35000000e+03], - [6.00000000e+00, 4.00000000e+00, 2.00000000e+00, 4.80000000e+01, - 4.77100000e-01, 1.83000000e+03], - [7.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01, - 4.64800000e-01, 7.25000000e+02], - [5.00000000e+00, 5.00000000e+00, 3.00000000e+00, 2.40000000e+01, - 4.64800000e-01, 7.25000000e+02], - [8.00000000e+00, 0.00000000e+00, 0.00000000e+00, 6.00000000e+00, - 4.46200000e-01, 9.84000000e+02], - [7.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01, - 4.36100000e-01, 3.90000000e+02], - [6.00000000e+00, 4.00000000e+00, 4.00000000e+00, 2.40000000e+01, - 4.32900000e-01, 1.53000000e-13], - [6.00000000e+00, 6.00000000e+00, 0.00000000e+00, 1.20000000e+01, - 4.20700000e-01, 5.30000000e+02], - [8.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01, - 4.20700000e-01, 5.30000000e+02], - [5.00000000e+00, 5.00000000e+00, 5.00000000e+00, 8.00000000e+00, - 4.12200000e-01, 2.10000000e+02], - [7.00000000e+00, 5.00000000e+00, 1.00000000e+00, 4.80000000e+01, - 4.12200000e-01, 2.10000000e+02], - [6.00000000e+00, 6.00000000e+00, 2.00000000e+00, 2.40000000e+01, - 4.09500000e-01, 1.98000000e-26], - [8.00000000e+00, 4.00000000e+00, 0.00000000e+00, 2.40000000e+01, - 3.99100000e-01, 2.85000000e+02], - [7.00000000e+00, 5.00000000e+00, 3.00000000e+00, 4.80000000e+01, - 3.91900000e-01, 1.13000000e+02], - [9.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01, - 3.91900000e-01, 1.13000000e+02], - [8.00000000e+00, 4.00000000e+00, 2.00000000e+00, 4.80000000e+01, - 3.89500000e-01, 4.44000000e-14], - [6.00000000e+00, 6.00000000e+00, 4.00000000e+00, 2.40000000e+01, - 3.80600000e-01, 1.53000000e+02], - [9.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01, - 3.74200000e-01, 6.08000000e+01], - [8.00000000e+00, 4.00000000e+00, 4.00000000e+00, 2.40000000e+01, - 3.64400000e-01, 8.26000000e+01], - [9.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01, - 3.58800000e-01, 3.27000000e+01], - [7.00000000e+00, 5.00000000e+00, 5.00000000e+00, 2.40000000e+01, - 3.58800000e-01, 3.27000000e+01], - [7.00000000e+00, 7.00000000e+00, 1.00000000e+00, 2.40000000e+01, - 3.58800000e-01, 3.27000000e+01]]) + A = np.array([[1.00000000e+00, 1.00000000e+00, 1.00000000e+00, 8.00000000e+00, + 2.06110000e+00, 5.54000000e+04], + [2.00000000e+00, 2.00000000e+00, 0.00000000e+00, 1.20000000e+01, + 1.26220000e+00, 7.52000000e+04], + [3.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01, + 1.07640000e+00, 2.98000000e+04], + [2.00000000e+00, 2.00000000e+00, 2.00000000e+00, 8.00000000e+00, + 1.03060000e+00, 2.50000000e-25], + [4.00000000e+00, 0.00000000e+00, 0.00000000e+00, 6.00000000e+00, + 8.92500000e-01, 4.05000000e+04], + [3.00000000e+00, 3.00000000e+00, 1.00000000e+00, 2.40000000e+01, + 8.19000000e-01, 1.61000000e+04], + [4.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01, + 7.28700000e-01, 2.18000000e+04], + [5.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01, + 6.87000000e-01, 8.64000000e+03], + [3.00000000e+00, 3.00000000e+00, 3.00000000e+00, 8.00000000e+00, + 6.87000000e-01, 8.64000000e+03], + [4.00000000e+00, 4.00000000e+00, 0.00000000e+00, 1.20000000e+01, + 6.31100000e-01, 1.17000000e+04], + [5.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01, + 6.03400000e-01, 4.65000000e+03], + [4.00000000e+00, 4.00000000e+00, 2.00000000e+00, 2.40000000e+01, + 5.95000000e-01, 1.83000000e-12], + [6.00000000e+00, 2.00000000e+00, 0.00000000e+00, 2.40000000e+01, + 5.64500000e-01, 6.31000000e+03], + [5.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01, + 5.44400000e-01, 2.50000000e+03], + [6.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01, + 5.38200000e-01, 8.80000000e-26], + [4.00000000e+00, 4.00000000e+00, 4.00000000e+00, 8.00000000e+00, + 5.15300000e-01, 3.40000000e+03], + [5.00000000e+00, 5.00000000e+00, 1.00000000e+00, 2.40000000e+01, + 4.99900000e-01, 1.35000000e+03], + [7.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01, + 4.99900000e-01, 1.35000000e+03], + [6.00000000e+00, 4.00000000e+00, 2.00000000e+00, 4.80000000e+01, + 4.77100000e-01, 1.83000000e+03], + [7.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01, + 4.64800000e-01, 7.25000000e+02], + [5.00000000e+00, 5.00000000e+00, 3.00000000e+00, 2.40000000e+01, + 4.64800000e-01, 7.25000000e+02], + [8.00000000e+00, 0.00000000e+00, 0.00000000e+00, 6.00000000e+00, + 4.46200000e-01, 9.84000000e+02], + [7.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01, + 4.36100000e-01, 3.90000000e+02], + [6.00000000e+00, 4.00000000e+00, 4.00000000e+00, 2.40000000e+01, + 4.32900000e-01, 1.53000000e-13], + [6.00000000e+00, 6.00000000e+00, 0.00000000e+00, 1.20000000e+01, + 4.20700000e-01, 5.30000000e+02], + [8.00000000e+00, 2.00000000e+00, 2.00000000e+00, 2.40000000e+01, + 4.20700000e-01, 5.30000000e+02], + [5.00000000e+00, 5.00000000e+00, 5.00000000e+00, 8.00000000e+00, + 4.12200000e-01, 2.10000000e+02], + [7.00000000e+00, 5.00000000e+00, 1.00000000e+00, 4.80000000e+01, + 4.12200000e-01, 2.10000000e+02], + [6.00000000e+00, 6.00000000e+00, 2.00000000e+00, 2.40000000e+01, + 4.09500000e-01, 1.98000000e-26], + [8.00000000e+00, 4.00000000e+00, 0.00000000e+00, 2.40000000e+01, + 3.99100000e-01, 2.85000000e+02], + [7.00000000e+00, 5.00000000e+00, 3.00000000e+00, 4.80000000e+01, + 3.91900000e-01, 1.13000000e+02], + [9.00000000e+00, 1.00000000e+00, 1.00000000e+00, 2.40000000e+01, + 3.91900000e-01, 1.13000000e+02], + [8.00000000e+00, 4.00000000e+00, 2.00000000e+00, 4.80000000e+01, + 3.89500000e-01, 4.44000000e-14], + [6.00000000e+00, 6.00000000e+00, 4.00000000e+00, 2.40000000e+01, + 3.80600000e-01, 1.53000000e+02], + [9.00000000e+00, 3.00000000e+00, 1.00000000e+00, 4.80000000e+01, + 3.74200000e-01, 6.08000000e+01], + [8.00000000e+00, 4.00000000e+00, 4.00000000e+00, 2.40000000e+01, + 3.64400000e-01, 8.26000000e+01], + [9.00000000e+00, 3.00000000e+00, 3.00000000e+00, 2.40000000e+01, + 3.58800000e-01, 3.27000000e+01], + [7.00000000e+00, 5.00000000e+00, 5.00000000e+00, 2.40000000e+01, + 3.58800000e-01, 3.27000000e+01], + [7.00000000e+00, 7.00000000e+00, 1.00000000e+00, 2.40000000e+01, + 3.58800000e-01, 3.27000000e+01]]) diamd = A[:, 4] - #diamMult = A[:, 3] # unused variable + # diamMult = A[:, 3] # unused variable diamFCalcSq = A[:, 5] nref = hkl.shape[0] - #% disp(['there are: ' num2str(nref) ' reflections']); - #% whos loc + # % disp(['there are: ' num2str(nref) ' reflections']); + # % whos loc ''' % [i,j] = size(x); @@ -363,10 +364,10 @@ def pkintread(hkl, loc): for i in range(nref): if loc[i][0] > 0: - #% satisfies Bragg condition (otherwise ignore) + # % satisfies Bragg condition (otherwise ignore) Fsq = Fsqcalc(loc[i][1], diamd, diamFCalcSq) - #% Fsq = 1; - L = (np.sin(np.radians(loc[i][2] / 2.0)))**2 # Lorentz correction + # % Fsq = 1; + L = (np.sin(np.radians(loc[i][2] / 2.0))) ** 2 # Lorentz correction R = 1.0 # %dipLam(i)^4; %reflectivity correction A = 1.0 # %Absorption correction Ecor = 1 @@ -393,7 +394,7 @@ def Fsqcalc(d, diamd, diamFCalcSq): % global sf111 sf220 sf311 sf400 sf331 ''' - #n = len(diamd) # unused variable + # n = len(diamd) # unused variable ref = d dif = abs(diamd - ref) i = dif.argmin(0) # i is index of diamd closest to d @@ -496,7 +497,7 @@ def getMANTIDdat_keepbinning(csvfile): y = [] e = [] if fid < 0: - print('Error opening file: ' + csvfile) + print(('Error opening file: ' + csvfile)) for i in range(1, len(lines)): a, b, c = lines[i].split(",") x.append(float(a)) @@ -534,7 +535,7 @@ def findeqvs(hkl): nperm = len(permcomphkl) for k in range(nperm): if refhkl[0] == permcomphkl[k][0] and refhkl[1] == permcomphkl[k][1] and \ - refhkl[2] == permcomphkl[k][2]: + refhkl[2] == permcomphkl[k][2]: eqvlab[j] = lab lab += 1 @@ -558,8 +559,8 @@ def showx3(x): global neqv1, eqvlab1, neqv2, eqvlab2 global difa, function_verbose - #nref1 = hkl1.shape[0] # % number of reflections to integrate over # unused variable - #nref2 = hkl2.shape[0] # % number of reflections to integrate over # unused variable + # nref1 = hkl1.shape[0] # % number of reflections to integrate over # unused variable + # nref2 = hkl2.shape[0] # % number of reflections to integrate over # unused variable # % returns array with same dim as input labelling equivs eqvlab1, neqv1 = findeqvs(hkl1) eqvlab2, neqv2 = findeqvs(hkl2) @@ -569,7 +570,7 @@ def showx3(x): pkmult2 = x[6 + neqv1:7 + neqv1 + neqv2 - 1] sf = x[neqv1 + neqv2 + 7 - 1] pkwid1 = x[neqv1 + neqv2 + 8 - 1] - #bgd = x[neqv1 + neqv2 + 8 - 1:neqv1 + neqv2 + 9 + 2 - 1] # unused variable + # bgd = x[neqv1 + neqv2 + 8 - 1:neqv1 + neqv2 + 9 + 2 - 1] # unused variable pkwid2 = x[neqv1 + neqv2 + 10] # % if diamond intensities the same, allow single scale f relsf = x[neqv1 + neqv2 + 11] @@ -577,18 +578,18 @@ def showx3(x): L2 = x[neqv1 + neqv2 + 13] print('_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/\n') - print('Setting angles diam {0} : \nalp {1} bet {2} gam {3} \n'.format( - 1, setang1[0], setang1[1], setang1[2])) - print('pkmult1: {0}\n'.format(pkmult1)) - print('Setting angles diam {0} : \nalp {1} bet {2} gam {3} \n'.format( - 2, setang2[0], setang2[1], setang2[2])) - print('pkmult2: {0}\n'.format(pkmult2)) - print('Scale factor: {0}\n'.format(sf)) - print('pkwid1: {0}\n'.format(pkwid1)) - print('pkwid2: {0}\n'.format(pkwid2)) - print('Rel. scale factor : {0}\n'.format(relsf)) - print('Lambda multiplier: {0}\n'.format(delam)) - print('L2 sample to detector: {0} m\n'.format(L2)) + print(('Setting angles diam {0} : \nalp {1} bet {2} gam {3} \n'.format( + 1, setang1[0], setang1[1], setang1[2]))) + print(('pkmult1: {0}\n'.format(pkmult1))) + print(('Setting angles diam {0} : \nalp {1} bet {2} gam {3} \n'.format( + 2, setang2[0], setang2[1], setang2[2]))) + print(('pkmult2: {0}\n'.format(pkmult2))) + print(('Scale factor: {0}\n'.format(sf))) + print(('pkwid1: {0}\n'.format(pkwid1))) + print(('pkwid2: {0}\n'.format(pkwid2))) + print(('Rel. scale factor : {0}\n'.format(relsf))) + print(('Lambda multiplier: {0}\n'.format(delam))) + print(('L2 sample to detector: {0} m\n'.format(L2))) print('_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/\n') @@ -646,7 +647,7 @@ def SimTransOutput3(name, x): delam = x[neqv1 + neqv2 + 12] L2 = x[neqv1 + neqv2 + 13] - shftlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF**2) + shftlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2) # number of lambda points to calculate over npt = shftlam.shape[0] # calculate information for peaks for crystal 1 using hkl,UB1, setang, @@ -672,9 +673,9 @@ def SimTransOutput3(name, x): # calculate background profile by multiplying this with coefficients # themselves bgdprof = nonzerobgd.dot(X) - #bgdprof = np.outer(nonzerobgd, X) + # bgdprof = np.outer(nonzerobgd, X) # print bgdprof - #bgdprof = bgdprof[0, :] + # bgdprof = bgdprof[0, :] # calculate peaks for crystal 1 t1 = np.zeros(npt) # initialise array containing profile @@ -682,10 +683,10 @@ def SimTransOutput3(name, x): if pktype == 1: pkpars1[i][0] = pkpars1[i][0] * delam # linear lambda shift sig = pkwid1 * pkpars1[i][0] + pkwid2 * \ - (pkpars1[i][0]**2.) # const del(lambda)/lambda - extScl = pkpars1[i][0]**0 # lambda dependent extinction effect + (pkpars1[i][0] ** 2.) # const del(lambda)/lambda + extScl = pkpars1[i][0] ** 0 # lambda dependent extinction effect t1 = t1 - extScl * pkmult1[int(eqvlab1[i])] * pkcalcint1[i] * ( - np.exp(-((shftlam - pkpars1[i][0])**2.) / (2 * (sig**2)))) + np.exp(-((shftlam - pkpars1[i][0]) ** 2.) / (2 * (sig ** 2)))) # calculate peaks for crystal 2 t2 = np.zeros(npt) # initialise array containing profile @@ -693,26 +694,26 @@ def SimTransOutput3(name, x): if pktype == 1: pkpars2[i][0] = pkpars2[i][0] * delam # linear lambda shift sig = pkwid1 * pkpars2[i][0] + pkwid2 * \ - (pkpars2[i][0]**2.) # const del(lambda)/lambda - extScl = pkpars2[i][0]**0 # lambda dependent extinction effect + (pkpars2[i][0] ** 2.) # const del(lambda)/lambda + extScl = pkpars2[i][0] ** 0 # lambda dependent extinction effect t2 = t2 - extScl * pkmult2[int(eqvlab2[i])] * pkcalcint2[i] * ( - np.exp(-(shftlam - pkpars2[i][0])**2. / (2 * (sig**2)))) + np.exp(-(shftlam - pkpars2[i][0]) ** 2. / (2 * (sig ** 2)))) # calculate final profile ttot = (bgdprof + sf * t1) * (bgdprof + sf * t2) - #t1 = 1.0; + # t1 = 1.0; # t2 = 1.0; # introduce weighting function and calc chi2... w = np.ones(len(shftlam)) # equal weighting everywhere - #i1 = np.where(shftlam > 2.15)[0][0] - #j1 = np.where(shftlam > 2.65)[0][0] + # i1 = np.where(shftlam > 2.15)[0][0] + # j1 = np.where(shftlam > 2.65)[0][0] # w[i1:j1] = 5 #extra weighting in region of first peaks # i1 = find(lam>1.68,1,'first'); # j1 = find(lam>2.05,1,'first'); # w(i1:j1)=5; %extra weighting but not too much resid = (y - ttot) * w - chi2 = np.sum(resid**2. / (2 * e**2)) / npt + chi2 = np.sum(resid ** 2. / (2 * e ** 2)) / npt output = 1 if output == 1: @@ -810,7 +811,7 @@ def SimTrans3(x): delam = x[neqv1 + neqv2 + 12] L2 = x[neqv1 + neqv2 + 13] - shftlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF**2) + shftlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2) # number of lambda points to calculate over npt = shftlam.shape[0] # calculate information for peaks for crystal 1 using hkl,UB1, setang, @@ -836,9 +837,9 @@ def SimTrans3(x): # calculate background profile by multiplying this with coefficients # themselves bgdprof = nonzerobgd.dot(X) - #bgdprof = np.outer(nonzerobgd, X) + # bgdprof = np.outer(nonzerobgd, X) # print bgdprof - #bgdprof = bgdprof[0, :] + # bgdprof = bgdprof[0, :] # calculate peaks for crystal 1 t1 = np.zeros(npt) # initialise array containing profile @@ -846,10 +847,10 @@ def SimTrans3(x): if pktype == 1: pkpars1[i][0] = pkpars1[i][0] * delam # linear lambda shift sig = pkwid1 * pkpars1[i][0] + pkwid2 * \ - (pkpars1[i][0]**2.) # const del(lambda)/lambda - extScl = pkpars1[i][0]**0 # lambda dependent extinction effect + (pkpars1[i][0] ** 2.) # const del(lambda)/lambda + extScl = pkpars1[i][0] ** 0 # lambda dependent extinction effect t1 = t1 - extScl * pkmult1[int(eqvlab1[i])] * pkcalcint1[i] * ( - np.exp(-((shftlam - pkpars1[i][0])**2.) / (2 * (sig**2)))) + np.exp(-((shftlam - pkpars1[i][0]) ** 2.) / (2 * (sig ** 2)))) # calculate peaks for crystal 2 t2 = np.zeros(npt) # initialise array containing profile @@ -857,30 +858,30 @@ def SimTrans3(x): if pktype == 1: pkpars2[i][0] = pkpars2[i][0] * delam # linear lambda shift sig = pkwid1 * pkpars2[i][0] + pkwid2 * \ - (pkpars2[i][0]**2.) # const del(lambda)/lambda - extScl = pkpars2[i][0]**0 # lambda dependent extinction effect + (pkpars2[i][0] ** 2.) # const del(lambda)/lambda + extScl = pkpars2[i][0] ** 0 # lambda dependent extinction effect t2 = t2 - extScl * pkmult2[int(eqvlab2[i])] * pkcalcint2[i] * ( - np.exp(-(shftlam - pkpars2[i][0])**2. / (2 * (sig**2)))) + np.exp(-(shftlam - pkpars2[i][0]) ** 2. / (2 * (sig ** 2)))) # calculate final profile ttot = (bgdprof + sf * t1) * (bgdprof + sf * t2) - #t1 = 1.0; + # t1 = 1.0; # t2 = 1.0; # introduce weighting function and calc chi2... w = np.ones(len(shftlam)) # equal weighting everywhere - #i1 = np.where(shftlam > 2.15)[0][0] - #j1 = np.where(shftlam > 2.65)[0][0] + # i1 = np.where(shftlam > 2.15)[0][0] + # j1 = np.where(shftlam > 2.65)[0][0] # w[i1:j1] = 5 #extra weighting in region of first peaks # i1 = find(lam>1.68,1,'first'); # j1 = find(lam>2.05,1,'first'); # w(i1:j1)=5; %extra weighting but not too much resid = (y - ttot) * w - chi2 = np.sum(resid**2. / (2 * e**2)) / npt + chi2 = np.sum(resid ** 2. / (2 * e ** 2)) / npt # Print if the user wants verbose minimization if function_verbose == 'y': - print('Chi^2 ... ' + str(chi2)) + print(('Chi^2 ... ' + str(chi2))) return chi2 @@ -921,10 +922,10 @@ def FitTrans(): print('*diamonds allowed to have different dip intensities!*') if cnstang == 1: - print( - '*Diam {0} setting angles constrained to range of +/- {1} about their current values*'.format(1, anglim1)) - print( - '*Diam {0} setting angles constrained to range of +/- {1} about their current values*'.format(2, anglim2)) + print(( + '*Diam {0} setting angles constrained to range of +/- {1} about their current values*'.format(1, anglim1))) + print(( + '*Diam {0} setting angles constrained to range of +/- {1} about their current values*'.format(2, anglim2))) else: print('no constraint on setting angles') @@ -932,13 +933,13 @@ def FitTrans(): print('*intensity multipliers fixed*') # Get Input Files... - peaks_file = str(raw_input('Name of file containing diamond peaks: ')) + peaks_file = str(input('Name of file containing diamond peaks: ')) - run_number = str(raw_input('Input run number for transmission data: ')) + run_number = str(input('Input run number for transmission data: ')) # Build input filenames - #fullfilename_ub1 = str(run_number) + 'UB1.dat' # unused variable - #fullfilename_ub2 = str(run_number) + 'UB2.dat' # unused variable + # fullfilename_ub1 = str(run_number) + 'UB1.dat' # unused variable + # fullfilename_ub2 = str(run_number) + 'UB2.dat' # unused variable fullfilename_trans = 'transNorm' + str(run_number) + '.dat' # get both UB's @@ -948,13 +949,13 @@ def FitTrans(): # uigetfile('*.dat','Choose UB matrix for upstream diamond:'); # fullfilename = [pathname filename]; # fullfilename_ub1 = 'snap13108UB1.dat' - #UB1, remainder = getISAWub(fullfilename_ub1) + # UB1, remainder = getISAWub(fullfilename_ub1) # [filename pathname ~] = ... # uigetfile('*.dat','Choose UB matrix for downstream diamond:'); # fullfilename = [pathname filename]; # fullfilename_ub2 = 'snap13108UB2.dat' - #UB2, remainder = getISAWub(fullfilename_ub2) + # UB2, remainder = getISAWub(fullfilename_ub2) # get transmission data... # [filename,pathname,~] = ... @@ -963,7 +964,7 @@ def FitTrans(): fullfilename_trans = 'transNorm13148.csv' TOF, yin, ein = getMANTIDdat_keepbinning(fullfilename_trans) - print('Starting refinement for: ' + fullfilename_trans) + print(('Starting refinement for: ' + fullfilename_trans)) # set-up simulation @@ -982,8 +983,8 @@ def FitTrans(): # rebin transmission data lam = 0.0039558 * TOF / (L1 + initL2) - print('wavelength limits: ' + - str(lam[0]) + ' and ' + str(lam[len(lam) - 1])) + print(('wavelength limits: ' + + str(lam[0]) + ' and ' + str(lam[len(lam) - 1]))) minlam = 0.8 maxlam = 3.5 imin = np.where(lam >= minlam)[0][0] @@ -1009,7 +1010,7 @@ def FitTrans(): # initial conditions for crystal 2 setang2 = np.zeros(3) - #setang2[1:3][0] = 0.0 + # setang2[1:3][0] = 0.0 a, b, c = pkposcalc(allhkl, UB2, setang2) pkpars2 = np.column_stack((a, b, c)) @@ -1034,8 +1035,8 @@ def FitTrans(): hkl2 = np.vstack([hkl2, allhkl[i]]) k2 += 1 - print('There are: ' + str(k1) + ' expected dips due to Crystal 1') - print('There are: ' + str(k2) + ' expected dips due to Crystal 2') + print(('There are: ' + str(k1) + ' expected dips due to Crystal 1')) + print(('There are: ' + str(k2) + ' expected dips due to Crystal 2')) # determine equivalents # returns array with same dim as input labelling equivs @@ -1058,7 +1059,7 @@ def FitTrans(): pkcalcint2 *= 1e-6 pkmult2 = np.ones(neqv2) # peak intensity multiplier - relsf = 1.0 # default value + relsf = 1.0 # default value delam = 1.0 L2 = initL2 tbgd = bgd @@ -1066,29 +1067,29 @@ def FitTrans(): # Either generate, or read variable array from file # This is one big array with all the parameters to be refined in it. - prevf = str(raw_input('Look for pars from a previous run ([y]/n)? ')) + prevf = str(input('Look for pars from a previous run ([y]/n)? ')) if prevf == 'n': x0 = np.hstack((setang1, pkmult1, setang2, pkmult2, sf, pkwid, tbgd, pkwid2, relsf, delam, L2)) else: - # choose which file to use - parfilename = str(raw_input('Choose file with starting pars: ')) + # choose which file to use + parfilename = str(input('Choose file with starting pars: ')) parfullfilename = parfilename x0 = dlmread(parfullfilename) - tog = str(raw_input('Got parameters from: \n' + - parfilename + '\nUse these ([y]/n)?')) + tog = str(input('Got parameters from: \n' + + parfilename + '\nUse these ([y]/n)?')) if tog == 'n': x0 = np.hstack((setang1, pkmult1, setang2, pkmult2, sf, pkwid, tbgd, pkwid2, relsf, delam, L2)) print('discarding pars from previous run') - print(str(len(x0)) + ' parameters will be refined') + print((str(len(x0)) + ' parameters will be refined')) nvar = len(x0) - print('number of variables: ' + str(nvar)) - #nref1 = hkl1.shape[0] # unused variable - #nref2 = hkl2.shape[0] # unused variable + print(('number of variables: ' + str(nvar))) + # nref1 = hkl1.shape[0] # unused variable + # nref2 = hkl2.shape[0] # unused variable # need to apply correction in the case that pars from previous run had # fxsamediam==1 and current run also has fxsamediam==1 @@ -1096,8 +1097,8 @@ def FitTrans(): if fxsamediam == 1 and x0[neqv1 + neqv2 + 11] != 1: x0[6 + neqv1:7 + neqv1 + neqv2 - 1] = x0[3:4 + - neqv2 - 1] / x0[neqv1 + neqv2 + 11] - print('Diam 2 peak multipliers reset: ' + str(x0[neqv1 + neqv2 + 11])) + neqv2 - 1] / x0[neqv1 + neqv2 + 11] + print(('Diam 2 peak multipliers reset: ' + str(x0[neqv1 + neqv2 + 11]))) # check starting point @@ -1113,7 +1114,7 @@ def FitTrans(): plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0.) plt.show() - print('Initial chi^2 is: ' + str(chi2)) + print(('Initial chi^2 is: ' + str(chi2))) showx3(x0) @@ -1125,7 +1126,7 @@ def FitTrans(): A[3:4 + neqv1 - 1] = -1.0 # pkmult1 Contrains intensities to be positive A[4 + neqv1 - 1:6 + neqv1] = 0.0 # setang2 *no constraint A[6 + neqv1:7 + neqv1 + neqv2 - 1] = -1.0 # pkmult2 - A[6 + neqv1 + neqv2] = -1.0 # sf Scale factor must be +ve + A[6 + neqv1 + neqv2] = -1.0 # sf Scale factor must be +ve A[7 + neqv1 + neqv2] = -1.0 # pkwid peak width must be +ve A[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = 0.0 # bgd *no constraint A[(neqv1 + neqv2 + 10)] = 0.0 # *no constraint @@ -1139,7 +1140,7 @@ def FitTrans(): Aeq[3:4 + neqv1 - 1] = 0.0 # pkmult1 Aeq[4 + neqv1 - 1:6 + neqv1] = 0.0 # setang2 Aeq[6 + neqv1:7 + neqv1 + neqv2 - 1] = 0.0 # pkmult2 - Aeq[6 + neqv1 + neqv2] = 0.0 # sf + Aeq[6 + neqv1 + neqv2] = 0.0 # sf Aeq[7 + neqv1 + neqv2] = 0.0 # pkwid Aeq[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = 0 # unfixed bgd Aeq[neqv1 + neqv2 + 10] = 0 @@ -1147,7 +1148,7 @@ def FitTrans(): Aeq[neqv1 + neqv2 + 12] = 0 Aeq[neqv1 + neqv2 + 13] = 0 - #beq = 0 # unused variable + # beq = 0 # unused variable # lower bounds lb = np.zeros(len(x0)) @@ -1155,7 +1156,7 @@ def FitTrans(): lb[3:4 + neqv1 - 1] = 0.5 # pkmult1 lb[4 + neqv1 - 1:6 + neqv1] = -10 # setang2 lb[6 + neqv1:7 + neqv1 + neqv2 - 1] = 0.5 # pkmult2 - lb[6 + neqv1 + neqv2] = 0.0 # sf + lb[6 + neqv1 + neqv2] = 0.0 # sf lb[7 + neqv1 + neqv2] = 0.0005 # pkwid lb[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = [0.995, -0.0005] # bgd lb[neqv1 + neqv2 + 10] = 0.5e-4 # 2nd order pkwid @@ -1170,7 +1171,7 @@ def FitTrans(): ub[3:4 + neqv1 - 1] = 50 # pkmult1 ub[4 + neqv1 - 1:6 + neqv1] = 10 # setang2 ub[6 + neqv1:7 + neqv1 + neqv2 - 1] = 50 # pkmult2 - ub[6 + neqv1 + neqv2] = 50 # sf + ub[6 + neqv1 + neqv2] = 50 # sf ub[7 + neqv1 + neqv2] = 0.01 # pkwid ub[neqv1 + neqv2 + 8:neqv1 + neqv2 + 9 + 2 - 1] = [1.005, 0.0005] # bgd ub[neqv1 + neqv2 + 10] = 1.0e-2 # 2nd order pkwid @@ -1206,12 +1207,12 @@ def FitTrans(): ub[6 + neqv1:7 + neqv1 + neqv2 - 1] = x0[6 + neqv1:7 + neqv1 + neqv2 - 1] + 0.01 - prompt = str(raw_input('Enter anything to begin refinement...')) + prompt = str(input('Enter anything to begin refinement...')) print('Refining...\nMight take quite a long time...') max_number_iterations = int( - raw_input('Maximum number of iterations for minimization: ')) - function_verbose = str(raw_input('Verbose minimization ([y]/n): ')) + input('Maximum number of iterations for minimization: ')) + function_verbose = str(input('Verbose minimization ([y]/n): ')) # make dictionary holding constraints for minimization # equalities (all must equal 0) and inequalities @@ -1222,8 +1223,8 @@ def FitTrans(): # bounds have to be list of tuples with (lower, upper) for each parameter bds = np.vstack((lb, ub)).T - res = sp.minimize(SimTrans3, x0, method='SLSQP', bounds=bds, constraints=cons, options={'disp': True, - 'maxiter': max_number_iterations}) + res = sp.minimize(SimTrans3, x0, method='SLSQP', bounds=bds, constraints=cons, + options={'disp': True, 'maxiter': max_number_iterations}) # tolerance limits to put in minimization if you want so : 'ftol': 0.001 @@ -1245,9 +1246,9 @@ def FitTrans(): # neqv1+neqv2+11 # x[neqv1+neqv2+11] x[6 + neqv1:7 + neqv1 + neqv2 - 1] = x[3:4 + - neqv2 - 1] * x[neqv1 + neqv2 + 11] - print('Diam 2 peak multipliers reset with factor: ' + - str(x[neqv1 + neqv2 + 11])) + neqv2 - 1] * x[neqv1 + neqv2 + 11] + print(('Diam 2 peak multipliers reset with factor: ' + + str(x[neqv1 + neqv2 + 11]))) else: # label ensuring I know that run did not use fxsamediam x[neqv1 + neqv2 + 11] = 1.0 @@ -1261,13 +1262,13 @@ def FitTrans(): # calculate chi2 for best fit chi2 = SimTrans3(x) - print('Final Chi2 = ' + str(chi2)) + print(('Final Chi2 = ' + str(chi2))) # determine output wavelength range using refined L2 value - #lamscale = x[neqv1 + neqv2 + 12] # unused variable + # lamscale = x[neqv1 + neqv2 + 12] # unused variable L2 = x[neqv1 + neqv2 + 13] - outlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF**2) + outlam = 0.0039558 * TOF / (L1 + L2) + difa * (TOF ** 2) fig_name_final = 'Final result ' + run_number plt.figure(fig_name_final) @@ -1290,15 +1291,16 @@ def FitTrans(): plt.ylabel('Transmission') plt.show() - prompt = str(raw_input('output best fit to file ([y]/n): ')) + prompt = str(input('output best fit to file ([y]/n): ')) if prompt == 'n': print('Ending') else: fitparname = str(run_number) + '.best_fit_pars3.dat' np.savetxt(fitparname, x, delimiter=',') - print('output parameters written to file: \n' + fitparname) + print(('output parameters written to file: \n' + fitparname)) ofilename = str(run_number) + '.fitted3.dat' SimTransOutput3(ofilename, x) # generate output file with fitted data + if __name__ == "__main__": FitTrans() diff --git a/scripts/HFIRPowderReduction/HfirPDReductionControl.py b/scripts/HFIRPowderReduction/HfirPDReductionControl.py index 67fecc766589ab8b61168384299fcb1f7da2731d..e826905c598351da8115edb8f4b9f314c8d72ab0 100644 --- a/scripts/HFIRPowderReduction/HfirPDReductionControl.py +++ b/scripts/HFIRPowderReduction/HfirPDReductionControl.py @@ -6,7 +6,9 @@ # ############################################################################ import os -import urllib.request, urllib.error, urllib.parse +import urllib.request +import urllib.error +import urllib.parse import math import numpy @@ -813,7 +815,7 @@ class HFIRPDRedControl(object): if excludeddetlist is None: excludeddetlist = [] else: - print("[DB] Excluded detectors: %s" % (excludeddetlist), "Convert to numpy array", \ + print("[DB] Excluded detectors: %s" % (excludeddetlist), "Convert to numpy array", numpy.array(excludeddetlist)) basewsname = datamdws.name().split("_DataMD")[0] diff --git a/scripts/Inelastic/Direct/ISISDirecInelasticConfig.py b/scripts/Inelastic/Direct/ISISDirecInelasticConfig.py index 0c31eac4ba1e11b981b7705939718ba159305da5..51dba73f12d11e3603b7c712d59467a76a123bc1 100644 --- a/scripts/Inelastic/Direct/ISISDirecInelasticConfig.py +++ b/scripts/Inelastic/Direct/ISISDirecInelasticConfig.py @@ -621,13 +621,13 @@ class MantidConfigDirectInelastic(object): if len(source) == 0: raise ValueError( '"replace" field of {0} file for instrument {1} has to contain attribute "var" and its value' - .format(self._user_files_descr, self._user.instrument)) + .format(self._user_files_descr, self._user.instrument)) # what should be placed instead of the replacement dest = repl_info.getAttribute("by_var") if len(dest) == 0: raise ValueError( '"replace" field of {0} file for instrument {1} has to contain attribute "by_var" and its value' - .format(self._user_files_descr, self._user.instrument)) + .format(self._user_files_descr, self._user.instrument)) # replace use-specific variables by their values if '$' in dest: diff --git a/scripts/Inelastic/Direct/PropertiesDescriptors.py b/scripts/Inelastic/Direct/PropertiesDescriptors.py index b8aaad9d8e6a1666013bf468c57b90f2a52e0530..d20b7334118dbbbf038ce966a6bc86f83b281c11 100644 --- a/scripts/Inelastic/Direct/PropertiesDescriptors.py +++ b/scripts/Inelastic/Direct/PropertiesDescriptors.py @@ -10,7 +10,7 @@ import math from collections import Iterable import mantid.simpleapi as mantid -from mantid import api, geometry, config +from mantid import api import Direct.ReductionHelpers as prop_helpers import collections @@ -663,9 +663,10 @@ class mon2NormalizationEnergyRange(PropDescriptor): val1 = float(en_range[0]) if val1 < 0.1 or val1 > 0.9: - message = "Lower mon2_norm_energy_range describes lower limit of energy to integrate neutron signal after the chopper.\n" \ - "The limit is defined as (this value)*incident_energy. Are you sure you want to set this_value to {0}?\n".format( - val1) + message = "Lower mon2_norm_energy_range describes lower limit of energy to integrate neutron signal after" \ + " the chopper.\nThe limit is defined as (this value)*incident_energy." \ + " Are you sure you want to set this_value to {0}?\n".format( + val1) if val1 > 1: return (False, 2, message) else: @@ -673,9 +674,10 @@ class mon2NormalizationEnergyRange(PropDescriptor): val2 = float(en_range[1]) if val2 < 1.1 or val2 > 1.9: - message = "Upper mon2_norm_energy_range describes upper limit of energy to integrate neutron signal after the chopper.\n" \ - "The limit is defined as (this value)*incident_energy. Are you sure you want to set this_value to {0}?\n".format( - val2) + message = "Upper mon2_norm_energy_range describes upper limit of energy to integrate neutron signal after" \ + " the chopper.\nThe limit is defined as (this value)*incident_energy." \ + " Are you sure you want to set this_value to {0}?\n".format( + val2) if val2 > 1: if result[0]: result = (False, 1, message) @@ -1050,7 +1052,9 @@ class MonovanIntegrationRange(prop_helpers.ComplexProperty): value = result if len(value) != 2: raise KeyError("monovan_integr_range has to be list of two values, " - "defining min/max values of integration range or None to use relative to incident energy limits") + "defining min/max values of integration range or None " + "to use relative to incident energy limits") + prop_helpers.ComplexProperty.__set__(self, tDict, value) def validate(self, instance, owner): @@ -1387,16 +1391,15 @@ class BackbgroundTestRange(PropDescriptor): """ validate background test range """ test_range = self.__get__(instance, owner) if test_range is None: - return (True, 0, '') + return True, 0, '' if test_range[0] >= test_range[1]: - return (False, 2, ' Background test range: [{0}:{1}] is incorrect '.format(test_range[0], test_range[1])) + return False, 2, ' Background test range: [{0}:{1}] is incorrect '.format(test_range[0], test_range[1]) if test_range[0] < 0: - return ( - False, 2, ' Background test range is TOF range, so it can not be negative={0}'.format(test_range[0])) + return False, 2, ' Background test range is TOF range, so it can not be negative={0}'.format(test_range[0]) if test_range[1] > 20000: - return (False, 1, ' Background test range is TOF range, its max value looks suspiciously big={0}'.format( - test_range[1])) - return (True, 0, '') + return False, 1, ' Background test range is TOF range, its max value looks suspiciously big={0}'.format( + test_range[1]) + return True, 0, '' # end BackbgroundTestRange diff --git a/scripts/Interface/reduction_gui/widgets/reflectometer/base_ref_reduction.py b/scripts/Interface/reduction_gui/widgets/reflectometer/base_ref_reduction.py index f21b87c61e6dff18176026764eb3d6e49aa482a9..1116c5b1cd9289691de4c4abf961b0012794f6fc 100644 --- a/scripts/Interface/reduction_gui/widgets/reflectometer/base_ref_reduction.py +++ b/scripts/Interface/reduction_gui/widgets/reflectometer/base_ref_reduction.py @@ -515,11 +515,7 @@ class BaseRefWidget(BaseWidget): data_y = mantid.mtd[ws].dataY(0) # cleanup data 0-> NAN - for y_val in data_y: - # print '-> data_y[j]: ' , data_y[j] , ' data_e[j]: ' , data_y[j] - if y_val < 1e-12: - _y_val = np.nan - + data_y = [np.nan if y_val < 1e-12 else y_val for y_val in data_y] _file_number += 1 # END OF DEBUGGING ONLY diff --git a/scripts/MantidIPython/__init__.py b/scripts/MantidIPython/__init__.py index 4ede76d88d070c30442eb2eeda0ed3d9098acbde..d5a758b524675515f6ed2fbb7741e2279c58559f 100644 --- a/scripts/MantidIPython/__init__.py +++ b/scripts/MantidIPython/__init__.py @@ -21,10 +21,8 @@ Some tools for use in ipython notebooks generated by Mantid. """ +# Suppress warnings about unused import as these +# imports are important for iPython from MantidIPython.plot_functions import * -import warnings -import mantid.kernel - -# Check if the version of Mantid being used matches the version which created the notebook. -if "3.5.20160108.1509" != mantid.kernel.version_str(): - warnings.warn("Version of Mantid being used does not match version which created the notebook.") +import warnings # noqa: F401 +import mantid.kernel # noqa: F401 diff --git a/scripts/reducer_singleton.py b/scripts/reducer_singleton.py index c22114225429a55453059a77de500a956ae799e9..b0fdb5d7bc21e06a069041a3246466f3fe7cc738 100644 --- a/scripts/reducer_singleton.py +++ b/scripts/reducer_singleton.py @@ -1,8 +1,9 @@ -#pylint: disable=invalid-name +# pylint: disable=invalid-name import random import string import os import mantid +import time from isis_instrument import BaseInstrument @@ -11,6 +12,7 @@ class ReductionStep(object): """ Base class for reduction steps """ + @classmethod def delete_workspaces(cls, workspace): """ @@ -25,8 +27,9 @@ class ReductionStep(object): """ Generate a unique name for an internal workspace """ - random_str = ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(5)) - return "__"+descriptor+"_"+extract_workspace_name(filepath)+"_"+random_str + random_str = ''.join( + random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(5)) + return "__" + descriptor + "_" + os.path.basename(filepath) + "_" + random_str def execute(self, reducer, inputworkspace=None, outputworkspace=None): """ @@ -64,14 +67,16 @@ class Reducer(object): output_workspaces = [] def __init__(self): - self.UID = ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(5)) + self.UID = ''.join( + random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(5)) self._reduction_steps = [] def set_instrument(self, configuration): if issubclass(configuration.__class__, BaseInstrument): self.instrument = configuration else: - raise RuntimeError("Reducer.set_instrument expects an %s object, found %s" % (Instrument, configuration.__class__)) + raise RuntimeError( + "Reducer.set_instrument expects an %s object, found %s" % (BaseInstrument, configuration.__class__)) def set_data_path(self, path): """ @@ -131,7 +136,7 @@ class Reducer(object): self.pre_process() # Go through the list of files to be reduced - #for file_ws in self._data_files: + # for file_ws in self._data_files: # for item in self._reduction_steps: # try: # result = item.execute(self, file_ws) @@ -141,7 +146,7 @@ class Reducer(object): # self.log_text += "\n%s\n" % sys.exc_value # raise - #any clean up, possibly removing workspaces + # any clean up, possibly removing workspaces self.post_process() # Determine which directory to use @@ -152,8 +157,8 @@ class Reducer(object): else: output_dir = os.path.expanduser('~') - self.log_text += "Reduction completed in %g sec\n" % (time.time()-t_0) - log_path = os.path.join(output_dir,"%s_reduction.log" % instrument_name) + self.log_text += "Reduction completed in %g sec\n" % (time.time() - t_0) + log_path = os.path.join(output_dir, "%s_reduction.log" % instrument_name) self.log_text += "Log saved to %s" % log_path # Write the log to file diff --git a/scripts/reduction/reducer.py b/scripts/reduction/reducer.py index 383d3763fb79450591d1f73830240f50479aa017..0854667337f60305e66c3d5a3275ab5abea4e606 100644 --- a/scripts/reduction/reducer.py +++ b/scripts/reduction/reducer.py @@ -24,12 +24,11 @@ import os import sys import time import types +import uuid from reduction.instrument import Instrument -import mantid -from mantid import simpleapi +import mantid.simpleapi as mantid import warnings import inspect -import random from reduction.find_data import find_data @@ -79,7 +78,7 @@ def validate_loader(func): if data_file is None: return else: - raise RuntimeError("SANSReductionSteps.LoadRun doesn't recognize workspace handle %s" % workspace) + raise RuntimeError("SANSReductionSteps.LoadRun doesn't recognize workspace handle %s" % inputworkspace) else: data_file = self._data_file @@ -107,7 +106,7 @@ def validate_loader(func): kwargs[kwargs["AlternateName"]] = data_file self.algorithm = alg - simpleapi.set_properties(alg, *(), **kwargs) + mantid.set_properties(alg, *(), **kwargs) alg.execute() if "OutputMessage" in propertyOrder: return alg.getPropertyValue("OutputMessage") @@ -146,7 +145,7 @@ def validate_loader(func): if data_file is None: return else: - raise RuntimeError("SANSReductionSteps.LoadRun doesn't recognize workspace handle %s" % workspace) + raise RuntimeError("SANSReductionSteps.LoadRun doesn't recognize workspace handle %s" % inputworkspace) else: data_file = self._data_file @@ -255,7 +254,7 @@ def validate_step(func): kwargs["OutputWorkspace"] = outputworkspace self.algorithm = alg - simpleapi.set_properties(alg, *(), **kwargs) + mantid.set_properties(alg, *(), **kwargs) alg.execute() if "OutputMessage" in propertyOrder: return alg.getPropertyValue("OutputMessage") @@ -335,8 +334,8 @@ class Reducer(object): output_workspaces = [] def __init__(self): - self.UID = ''.join( - random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(5)) + # Generate UUID and trim to 5 chars + self.UID = str(uuid.uuid1())[:5] self.property_manager = "__reduction_parameters_" + self.UID self._data_files = {} self._reduction_steps = [] @@ -366,8 +365,8 @@ class Reducer(object): Removes all workspace flagged as dirty, use when a reduction aborts with errors """ for bad_data in self._dirty: - if bad_data in mtd: - simpleapi.DeleteWorkspace(Workspace=bad_data) + if bad_data in mantid.mtd: + mantid.DeleteWorkspace(Workspace=bad_data) else: mantid.logger.notice('reducer: Could not access tainted workspace ' + bad_data) @@ -454,7 +453,7 @@ class Reducer(object): TODO: this needs to be an ordered list """ if data_file is None: - if workspace in mtd: + if workspace in mantid.mtd: self._data_files[workspace] = None return else: @@ -555,8 +554,7 @@ class ReductionStep(object): """ Generate a unique name for an internal workspace """ - random_str = ''.join( - random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for x in range(5)) + random_str = str(uuid.uuid1())[:5] return "__" + descriptor + "_" + extract_workspace_name(filepath) + "_" + random_str def execute(self, reducer, inputworkspace=None, outputworkspace=None): diff --git a/scripts/reduction_workflow/command_interface.py b/scripts/reduction_workflow/command_interface.py index c75684cba6d4036d55f89bcff1ddd3bcd5471621..6e53918ddba3b4f8b2d5a95dd29e1690e0dc98a3 100644 --- a/scripts/reduction_workflow/command_interface.py +++ b/scripts/reduction_workflow/command_interface.py @@ -5,11 +5,9 @@ from reduction_workflow.reducer import Reducer - class ReductionSingleton(object): """ Singleton reduction class """ - - ## storage for the instance reference + # storage for the instance reference __instance = None def __init__(self): @@ -66,7 +64,8 @@ class ReductionSingleton(object): def get_property_manager(name): - prop_mng = PropertyManagerDataService.retrieve(name) + # prop_mng = mantid.PropertyManagerDataService.retrieve(name) + pass ## List of user commands ###################################################### diff --git a/scripts/reduction_workflow/instruments/sans/hfir_command_interface.py b/scripts/reduction_workflow/instruments/sans/hfir_command_interface.py index a00031304af16f927baa590a57cff84c0e74fc85..c858994295cad12b31ac7473d8e4306df0ce6d20 100644 --- a/scripts/reduction_workflow/instruments/sans/hfir_command_interface.py +++ b/scripts/reduction_workflow/instruments/sans/hfir_command_interface.py @@ -8,7 +8,7 @@ List of common user commands for HFIR SANS import os.path import mantid -from reduction_workflow.command_interface import ReductionSingleton, Clear, OutputPath, Reduce1D, Reduce, AppendDataFile, ClearDataFiles +from reduction_workflow.command_interface import ReductionSingleton, Clear from reduction_workflow.find_data import find_data from reduction_workflow.instruments.sans import hfir_instrument diff --git a/scripts/reduction_workflow/instruments/sans/sns_command_interface.py b/scripts/reduction_workflow/instruments/sans/sns_command_interface.py index 5adfdacd89f0fe096406df999f7433f1ec0c2a1b..faa594255a283c64210f02d3482638273827bcc5 100644 --- a/scripts/reduction_workflow/instruments/sans/sns_command_interface.py +++ b/scripts/reduction_workflow/instruments/sans/sns_command_interface.py @@ -5,35 +5,9 @@ # Import the specific commands that we need - some of these are used in systemtests from reduction_workflow.command_interface import * -from hfir_command_interface import DarkCurrent, NoDarkCurrent, NoNormalization -from hfir_command_interface import SolidAngle, NoSolidAngle -from hfir_command_interface import DirectBeamCenter, ScatteringBeamCenter +from hfir_command_interface import SolidAngle from hfir_command_interface import SetBeamCenter as BaseSetBeamCenter -from hfir_command_interface import SensitivityCorrection, SetSensitivityBeamCenter -from hfir_command_interface import SensitivityDirectBeamCenter, SensitivityScatteringBeamCenter -from hfir_command_interface import NoSensitivityCorrection, DivideByThickness - -from hfir_command_interface import IQxQy, NoIQxQy, SaveIq, NoSaveIq, SaveIqAscii - -from hfir_command_interface import DirectBeamTransmission, TransmissionDarkCurrent -from hfir_command_interface import ThetaDependentTransmission -from hfir_command_interface import SetTransmissionBeamCenter, TransmissionDirectBeamCenter -from hfir_command_interface import SetTransmission, NoTransmission - -from hfir_command_interface import Background, NoBackground, NoBckTransmission -from hfir_command_interface import SetBckTransmission, BckDirectBeamTransmission -from hfir_command_interface import SetBckTransmissionBeamCenter, BckThetaDependentTransmission -from hfir_command_interface import BckTransmissionDirectBeamCenter, BckTransmissionDarkCurrent - -from hfir_command_interface import SetSampleDetectorOffset, SetSampleDetectorDistance -from hfir_command_interface import Mask, MaskRectangle, MaskDetectors, MaskDetectorSide -from hfir_command_interface import SetAbsoluteScale, SetDirectBeamAbsoluteScale -from hfir_command_interface import Stitch - -#from mantid.api import AlgorithmManager -#from mantid.kernel import Logger -#import mantid.simpleapi as simpleapi from reduction_workflow.find_data import find_data