diff --git a/Code/Mantid/Framework/PythonInterface/plugins/algorithms/RefLReduction.py b/Code/Mantid/Framework/PythonInterface/plugins/algorithms/RefLReduction.py
index 3f2140113243ab525b14a21b7698dcd43677e575..8123bdf230eda1c0779724dc313c0910cf6b975f 100644
--- a/Code/Mantid/Framework/PythonInterface/plugins/algorithms/RefLReduction.py
+++ b/Code/Mantid/Framework/PythonInterface/plugins/algorithms/RefLReduction.py
@@ -177,7 +177,10 @@ class RefLReduction(PythonAlgorithm):
         [dMD, dSD] = wks_utility.getDistances(ws_event_data)
         # get theta
         theta = wks_utility.getTheta(ws_event_data, angleOffsetDeg)
-                
+        # get proton charge
+        pc = wks_utility.getProtonCharge(ws_event_data)
+        error_0 = 1. / pc
+
         # rebin data
         ws_histo_data = wks_utility.rebinNeXus(ws_event_data,
                               [binTOFrange[0], binTOFsteps, binTOFrange[1]],
@@ -194,7 +197,7 @@ class RefLReduction(PythonAlgorithm):
                                       TOFrange[0],
                                       TOFrange[1],
                                       'data')
-
+        
         # normalize by current proton charge
         ws_histo_data = wks_utility.normalizeNeXus(ws_histo_data, 'data')
         
@@ -203,6 +206,12 @@ class RefLReduction(PythonAlgorithm):
                                                             dataLowResRange,
                                                             'data')
 
+#        #DEBUG ONLY
+#        wks_utility.ouput_big_ascii_file('/mnt/hgfs/j35/Matlab/DebugMantid/Strange0ValuesToData/data_file_after_low_resolution_integration.txt',
+#                                         data_tof_axis,
+#                                         data_y_axis,
+#                                         data_y_error_axis)
+        
         tof_axis = data_tof_axis[0:-1].copy()
         tof_axis_full = data_tof_axis.copy()
 
@@ -216,13 +225,23 @@ class RefLReduction(PythonAlgorithm):
                                                                            dataPeakRange,
                                                                            dataBackFlag,
                                                                            dataBackRange,
+                                                                           error_0, 
                                                                            'data')
-                
+#        #DEBUG ONLY
+#        wks_utility.ouput_big_ascii_file('/mnt/hgfs/j35/Matlab/DebugMantid/Strange0ValuesToData/data_file_back_sub_not_integrated.txt',
+#                                         data_tof_axis,
+#                                         data_y_axis,
+#                                         data_y_error_axis)
+
         # work with normalization        
         
         # load normalization
         ws_event_norm = wks_utility.loadNeXus(int(normalizationRunNumber), 'normalization')        
         
+        # get proton charge
+        pc = wks_utility.getProtonCharge(ws_event_norm)
+        error_0 = 1. / pc
+
         # rebin normalization
         ws_histo_norm = wks_utility.rebinNeXus(ws_event_norm,
                               [binTOFrange[0], binTOFsteps, binTOFrange[1]],
@@ -249,16 +268,29 @@ class RefLReduction(PythonAlgorithm):
                                                         normPeakRange,
                                                         normBackFlag,
                                                         normBackRange,
+                                                        error_0, 
                                                         'normalization') 
 
         [av_norm, av_norm_error] = wks_utility.fullSumWithError(norm_y_axis, 
                                                            norm_y_error_axis)
 
+#        ## DEBUGGING ONLY
+#        wks_utility.ouput_ascii_file('/mnt/hgfs/j35/Matlab/DebugMantid/Strange0ValuesToData/norm_file_back_sub_not_integrated.txt',
+#                                     norm_tof_axis, 
+#                                     av_norm, 
+#                                     av_norm_error)        
+
         [final_data_y_axis, final_data_y_error_axis] = wks_utility.divideDataByNormalization(data_y_axis,
                                                                                              data_y_error_axis,
                                                                                              av_norm,
                                                                                              av_norm_error)        
         
+#        #DEBUG ONLY
+#        wks_utility.ouput_big_ascii_file('/mnt/hgfs/j35/Matlab/DebugMantid/Strange0ValuesToData/data_divided_by_norm_not_integrated.txt',
+#                                         data_tof_axis,
+#                                         final_data_y_axis,
+#                                         final_data_y_error_axis)
+        
         # apply Scaling factor    
         [tof_axis_full, y_axis, y_error_axis] = wks_utility.applyScalingFactor(tof_axis_full, 
                                                                                final_data_y_axis, 
@@ -268,6 +300,11 @@ class RefLReduction(PythonAlgorithm):
                                                                                slitsValuePrecision,
                                                                                slitsWidthFlag)
         
+#        #DEBUG ONLY
+#        wks_utility.ouput_big_ascii_file('/mnt/hgfs/j35/Matlab/DebugMantid/Strange0ValuesToData/after_applying_scaling_factor.txt',
+#                                         data_tof_axis,
+#                                         y_axis,
+#                                         y_error_axis)
         
         if geometryCorrectionFlag: # convert To Q with correction
             [q_axis, y_axis, y_error_axis] = wks_utility.convertToQ(tof_axis_full,
@@ -294,6 +331,11 @@ class RefLReduction(PythonAlgorithm):
                                                                                      first_slit_size = first_slit_size,
                                                                                      last_slit_size = last_slit_size)
 
+            
+#            wks_utility.ouput_big_Q_ascii_file('/mnt/hgfs/j35/Matlab/DebugMantid/Strange0ValuesToData/after_conversion_to_q.txt',
+#                                         q_axis,
+#                                         y_axis,
+#                                         y_error_axis)
          
         sz = q_axis.shape
         nbr_pixel = sz[0]
diff --git a/Code/Mantid/scripts/reduction/instruments/reflectometer/wks_utility.py b/Code/Mantid/scripts/reduction/instruments/reflectometer/wks_utility.py
index d54d9b0d2e7f79eb0630aa125c418a300171683b..f64e2399942d2d5616fc404b1790cc4746ce12f6 100644
--- a/Code/Mantid/scripts/reduction/instruments/reflectometer/wks_utility.py
+++ b/Code/Mantid/scripts/reduction/instruments/reflectometer/wks_utility.py
@@ -1,4 +1,4 @@
-from numpy import zeros, arctan2, arange, shape, sqrt, fliplr, asfarray, where, mean, sum
+from numpy import zeros, ones, arctan2, arange, shape, sqrt, fliplr, asfarray, where, mean, sum
 from mantid.simpleapi import *
 # from MantidFramework import *
 import math
@@ -40,8 +40,8 @@ def getProtonCharge(st=None):
     if st is not None:
         mt_run = st.getRun()
         proton_charge_mtd_unit = mt_run.getProperty('gd_prtn_chrg').value
-        proton_charge = proton_charge_mtd_unit / 2.77777778e-10
-        return proton_charge
+#        proton_charge = proton_charge_mtd_unit / 2.77777778e-10
+        return proton_charge_mtd_unit
     return None  
 
 def getIndex(value, array):
@@ -1048,7 +1048,7 @@ def integrateOverLowResRange(mt1,
 
 def substractBackground(tof_axis, y_axis, y_error_axis,
                         peakRange, backFlag, backRange,
-                        type):
+                        error_0, type):
     """
     shape of y_axis : [256, nbr_tof]
     This routine will calculate the background, remove it from the peak
@@ -1072,11 +1072,7 @@ def substractBackground(tof_axis, y_axis, y_error_axis,
     # retrieve data
     _tofAxis = tof_axis
     nbrTof = len(_tofAxis)
-    
-    # by default, no space for background subtraction below and above peak
-    bMinBack = False
-    bMaxBack = False
-        
+            
     # size peak
     szPeak = peakMax - peakMin + 1
     
@@ -1094,19 +1090,22 @@ def substractBackground(tof_axis, y_axis, y_error_axis,
 
     for t in range(nbrTof):
 
+        # by default, no space for background subtraction below and above peak
+        bMinBack = False
+        bMaxBack = False
+
         if backMin < (peakMin):
             bMinBack = True
             _backMinArray = y_axis[backMin:peakMin, t]
             _backMinErrorArray = y_error_axis[backMin:peakMin, t]
             [_backMin, _backMinError] = weightedMean(_backMinArray,
-                                                          _backMinErrorArray)
+                                                          _backMinErrorArray, error_0)
              
         if (peakMax) < backMax:
             bMaxBack = True
             _backMaxArray = y_axis[peakMax+1:backMax+1, t]
-
             _backMaxErrorArray = y_error_axis[peakMax+1:backMax+1, t]
-            [_backMax, _backMaxError] = weightedMean(_backMaxArray, _backMaxErrorArray)    
+            [_backMax, _backMaxError] = weightedMean(_backMaxArray, _backMaxErrorArray, error_0)    
     
         # if no max background use min background
         if not bMaxBack:
@@ -1119,7 +1118,7 @@ def substractBackground(tof_axis, y_axis, y_error_axis,
             background_error = _backMaxError
             
         if bMinBack and bMaxBack:
-            [background, background_error] = weightedMean([_backMin, _backMax], [_backMinError, _backMaxError])
+            [background, background_error] = weightedMean([_backMin, _backMax], [_backMinError, _backMaxError], error_0)
         
         # remove background for each pixel of the peak
         for x in range(szPeak):
@@ -1131,32 +1130,35 @@ def substractBackground(tof_axis, y_axis, y_error_axis,
     
     return [final_y_axis, final_y_error_axis]
 
-def weightedMean(data_array, error_array):
+def weightedMean(data_array, error_array, error_0):
     
     sz = len(data_array)
     
     # calculate the numerator of mean
     dataNum = 0;
     for i in range(sz):
-        if not (data_array[i] == 0):
-            tmpFactor = float(data_array[i]) / float((pow(error_array[i],2)))
-            dataNum += tmpFactor
+        if (error_array[i] == 0):
+            error_array[i] = error_0
+            
+        tmpFactor = float(data_array[i]) / float((pow(error_array[i],2)))
+        dataNum += tmpFactor
     
     # calculate denominator
     dataDen = 0;
     for i in range(sz):
-        if not (error_array[i] == 0):
-            tmpFactor = 1./float((pow(error_array[i],2)))
-            dataDen += tmpFactor
+        if (error_array[i] == 0):
+            error_array[i] = error_0
+        tmpFactor = 1./float((pow(error_array[i],2)))
+        dataDen += tmpFactor
 
     if dataDen == 0:
-        mean = 0
+        data_mean = 0
         mean_error = 0
     else:            
-        mean = float(dataNum) / float(dataDen)
+        data_mean = float(dataNum) / float(dataDen)
         mean_error = math.sqrt(1/dataDen)     
 
-    return [mean, mean_error]
+    return [data_mean, mean_error]
 
 def weightedMeanOfRange(norm_y_axis, norm_y_error_axis):
     """
@@ -1325,11 +1327,6 @@ def ouput_big_Q_ascii_file(file_name,
                          y_axis,
                          y_error_axis):
       
-    print x_axis.shape
-    print y_axis.shape
-    print y_error_axis.shape
-      
-      
     f=open(file_name,'w')
       
     sz = y_axis.shape # (nbr_pixel, nbr_tof)