diff --git a/Framework/PythonInterface/plugins/algorithms/Abins.py b/Framework/PythonInterface/plugins/algorithms/Abins.py
index e085f6c2dc84589160963b603fe61ddc0ada4516..5f7dda7c45cdb5e328ef94dbfcc9778fd5c30e67 100644
--- a/Framework/PythonInterface/plugins/algorithms/Abins.py
+++ b/Framework/PythonInterface/plugins/algorithms/Abins.py
@@ -186,10 +186,8 @@ class Abins(PythonAlgorithm):
         prog_reporter.report("Input data from the user has been collected.")
 
         # 2) read ab initio data
-        ab_initio_loaders = {"CASTEP": AbinsModules.LoadCASTEP, "CRYSTAL": AbinsModules.LoadCRYSTAL,
-                             "DMOL3": AbinsModules.LoadDMOL3, "GAUSSIAN": AbinsModules.LoadGAUSSIAN}
-        rdr = ab_initio_loaders[self._ab_initio_program](input_ab_initio_filename=self._vibrational_or_phonon_data_file)
-        ab_initio_data = rdr.get_formatted_data()
+        ab_initio_data = AbinsModules.AbinsData.from_calculation_data(self._vibrational_or_phonon_data_file,
+                                                                      self._ab_initio_program)
         prog_reporter.report("Vibrational/phonon data has been read.")
 
         # 3) calculate S
@@ -737,10 +735,6 @@ class Abins(PythonAlgorithm):
         Checks rebinning parameters.
         :param message_end: closing part of the error message.
         """
-        pkt_per_peak = AbinsParameters.sampling['pkt_per_peak']
-        if not (isinstance(pkt_per_peak, six.integer_types) and 1 <= pkt_per_peak <= 1000):
-            raise RuntimeError("Invalid value of pkt_per_peak" + message_end)
-
         min_wavenumber = AbinsParameters.sampling['min_wavenumber']
         if not (isinstance(min_wavenumber, float) and min_wavenumber >= 0.0):
             raise RuntimeError("Invalid value of min_wavenumber" + message_end)
@@ -935,13 +929,13 @@ class Abins(PythonAlgorithm):
         self._out_ws_name = self.getPropertyValue('OutputWorkspace')
         self._calc_partial = (len(self._atoms) > 0)
 
-        # user defined interval is exclusive with respect to
+        # Sampling mesh is determined by
         # AbinsModules.AbinsParameters.sampling['min_wavenumber']
         # AbinsModules.AbinsParameters.sampling['max_wavenumber']
-        # with bin width AbinsModules.AbinsParameters.sampling['bin_width']
+        # and AbinsModules.AbinsParameters.sampling['bin_width']
         step = self._bin_width
-        start = AbinsParameters.sampling['min_wavenumber'] + step / 2.0
-        stop = AbinsParameters.sampling['max_wavenumber'] + step / 2.0
+        start = AbinsParameters.sampling['min_wavenumber']
+        stop = AbinsParameters.sampling['max_wavenumber'] + step
         self._bins = np.arange(start=start, stop=stop, step=step, dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
 
 
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/AbinsAdvancedParametersTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/AbinsAdvancedParametersTest.py
index ecfe1d48a3c983519fc047b0fa01cc3b4c193ffa..f64311c81f4400937152e8123f3a13c880663196 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/AbinsAdvancedParametersTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/AbinsAdvancedParametersTest.py
@@ -44,6 +44,7 @@ class AbinsAdvancedParametersTest(unittest.TestCase):
             "pkt_per_peak": 50,
             "max_wavenumber": 4100.0,
             "min_wavenumber": 0.0,
+            "broadening_scheme": "auto",
             "frequencies_threshold": 0.0,
             "s_relative_threshold": 0.001,
             "s_absolute_threshold": 1e-7}
diff --git a/Testing/Data/SystemTest/BenzeneBinWidthCASTEP.nxs.md5 b/Testing/Data/SystemTest/BenzeneBinWidthCASTEP.nxs.md5
index 949f9d5f2af3e83b1e89d822ef2eb3a0df69e725..2894a7a5302ff03cd1c872e49b07f5d72b8a4563 100644
--- a/Testing/Data/SystemTest/BenzeneBinWidthCASTEP.nxs.md5
+++ b/Testing/Data/SystemTest/BenzeneBinWidthCASTEP.nxs.md5
@@ -1 +1 @@
-9f52c317a2ed01f26514b539139df5e6
+8c4c9164cbd6256b8fe558a8cc5aab2f
diff --git a/Testing/Data/SystemTest/C6H5Cl-Gaussian.nxs.md5 b/Testing/Data/SystemTest/C6H5Cl-Gaussian.nxs.md5
index e536bb4ffd8ce11e9dabbf3d9e0ebdf562ba1a41..f1e705becf4e102b0739c5c63ce4866db79d13b1 100644
--- a/Testing/Data/SystemTest/C6H5Cl-Gaussian.nxs.md5
+++ b/Testing/Data/SystemTest/C6H5Cl-Gaussian.nxs.md5
@@ -1 +1 @@
-f1f62461da1f6153030a035259e45467
+426766659eb52b2cdca5f4c452269af6
diff --git a/Testing/Data/SystemTest/Crystalb3lypScratchAbins.nxs.md5 b/Testing/Data/SystemTest/Crystalb3lypScratchAbins.nxs.md5
index 167594df65ab6078343181770947c26d8977d7fe..a0d98fe4b09cffb5488c18105b9027db37d5f7b9 100644
--- a/Testing/Data/SystemTest/Crystalb3lypScratchAbins.nxs.md5
+++ b/Testing/Data/SystemTest/Crystalb3lypScratchAbins.nxs.md5
@@ -1 +1 @@
-388422e55d7804eba97f0c9e62e1bf1a
+a9184f96367d95daf4e4b21368a7b18f
diff --git a/Testing/Data/SystemTest/LiOH_H2O_2D2O_CASTEP.nxs.md5 b/Testing/Data/SystemTest/LiOH_H2O_2D2O_CASTEP.nxs.md5
index 6ef1265575d0c6645cbb282b5a6c7a5d8f5ecabb..5956bfd094e88d5fcad42ad7746e9775f1224bea 100644
--- a/Testing/Data/SystemTest/LiOH_H2O_2D2O_CASTEP.nxs.md5
+++ b/Testing/Data/SystemTest/LiOH_H2O_2D2O_CASTEP.nxs.md5
@@ -1 +1 @@
-6ec344e29194b881cfc567a566135818
+19a30c2a011dfd8553cb2673d32b8446
diff --git a/Testing/Data/SystemTest/Mapi.nxs.md5 b/Testing/Data/SystemTest/Mapi.nxs.md5
index b8e145e075e4dac86e18cfd722fe6a580ed4733a..e50d1cd23342658649c99602eb4eff419f0ac0e2 100644
--- a/Testing/Data/SystemTest/Mapi.nxs.md5
+++ b/Testing/Data/SystemTest/Mapi.nxs.md5
@@ -1 +1 @@
-c308b28f6a48285ce7a9bc24fc34829f
+bb780cfae9584657a059862dcfbb65ee
diff --git a/Testing/Data/SystemTest/Na2SiF6.nxs.md5 b/Testing/Data/SystemTest/Na2SiF6.nxs.md5
deleted file mode 100644
index 616c744a04dd978666324982220990c82678cabb..0000000000000000000000000000000000000000
--- a/Testing/Data/SystemTest/Na2SiF6.nxs.md5
+++ /dev/null
@@ -1 +0,0 @@
-f17d1f28923a94a1b097806d044321e2
diff --git a/Testing/Data/SystemTest/Na2SiF6_CASTEP.nxs.md5 b/Testing/Data/SystemTest/Na2SiF6_CASTEP.nxs.md5
index 305960b322405e9ba3a95424658c01271b135479..406a60d0ad2b5df99d2273ebe8444bf26f6d7c35 100644
--- a/Testing/Data/SystemTest/Na2SiF6_CASTEP.nxs.md5
+++ b/Testing/Data/SystemTest/Na2SiF6_CASTEP.nxs.md5
@@ -1 +1 @@
-ee9b47071de97a4712bf904a2d8019fd
+6a9ebb184ce6cbad194d2f4217e8e6c2
diff --git a/Testing/Data/SystemTest/Na2SiF6_DMOL3.nxs.md5 b/Testing/Data/SystemTest/Na2SiF6_DMOL3.nxs.md5
index 5d3b5fb9143b009e41e953160528adf0bf94a6b5..17ae498c9f66c478e2779493ba53b437b8cd1b8e 100644
--- a/Testing/Data/SystemTest/Na2SiF6_DMOL3.nxs.md5
+++ b/Testing/Data/SystemTest/Na2SiF6_DMOL3.nxs.md5
@@ -1 +1 @@
-68ea153bd8dac8c5b40a15e65fb6dac1
+23947afbbcec326052018f34551993a3
diff --git a/Testing/Data/SystemTest/TolueneLargerOrderAbins.nxs.md5 b/Testing/Data/SystemTest/TolueneLargerOrderAbins.nxs.md5
index 58f23181f720f18df14ffabf7cc12fcf33028188..6f162e65e2187871e7d09931033ba4c62dc8501c 100644
--- a/Testing/Data/SystemTest/TolueneLargerOrderAbins.nxs.md5
+++ b/Testing/Data/SystemTest/TolueneLargerOrderAbins.nxs.md5
@@ -1 +1 @@
-62ea5e1a6155f84b479a1947e9c38622
+7031da500c989b8ee3b7e884ab12426c
diff --git a/Testing/Data/SystemTest/TolueneScale.nxs.md5 b/Testing/Data/SystemTest/TolueneScale.nxs.md5
index 34ac1914c0ea7499cc7f68d8bffecddc8332c7da..9e1652d98002743d739737cb348e7c1e0fb74546 100644
--- a/Testing/Data/SystemTest/TolueneScale.nxs.md5
+++ b/Testing/Data/SystemTest/TolueneScale.nxs.md5
@@ -1 +1 @@
-e481f1052eb404a39bd7bb2197d2e6cd
+d1a0e156844c6459d230763742e21080
diff --git a/Testing/Data/SystemTest/TolueneScratchAbins.nxs.md5 b/Testing/Data/SystemTest/TolueneScratchAbins.nxs.md5
index 05c4de8bd3ab955f92764092c50114fa6830482a..ffd48113c4d5f8b80826ad666eca4a60f2c17b97 100644
--- a/Testing/Data/SystemTest/TolueneScratchAbins.nxs.md5
+++ b/Testing/Data/SystemTest/TolueneScratchAbins.nxs.md5
@@ -1 +1 @@
-57009dcbb2f5ec83f760d6f732a27cbf
+ab23cd079e36f46dd610dc22a9d8beac
diff --git a/Testing/Data/SystemTest/TolueneSmallerOrderAbins.nxs.md5 b/Testing/Data/SystemTest/TolueneSmallerOrderAbins.nxs.md5
index 042253d6bbfa168d994072eae2f122d21b74a5b9..7306501d648bc3762f2d8b4404b12a6234137571 100644
--- a/Testing/Data/SystemTest/TolueneSmallerOrderAbins.nxs.md5
+++ b/Testing/Data/SystemTest/TolueneSmallerOrderAbins.nxs.md5
@@ -1 +1 @@
-52247e506e1d99794a14561353791670
+6864e54cc20afed4303eb4d977137913
diff --git a/Testing/Data/SystemTest/TolueneTAbins.nxs.md5 b/Testing/Data/SystemTest/TolueneTAbins.nxs.md5
index da81bd3f49f6d852250a2aa8a928f048d41ac19a..c73aee7c27bcb71fa3f32305aaba2f0102a37f93 100644
--- a/Testing/Data/SystemTest/TolueneTAbins.nxs.md5
+++ b/Testing/Data/SystemTest/TolueneTAbins.nxs.md5
@@ -1 +1 @@
-11926b88b484b4731a0f95f5dfcf21af
+3423d4b0e7c111a829cfac95c81fc350
diff --git a/Testing/Data/UnitTest/BENZENE4_g03_win_LoadGAUSSIAN_data.txt.md5 b/Testing/Data/UnitTest/BENZENE4_g03_win_LoadGAUSSIAN_data.txt.md5
index f6f21da6690188bcaf915fdfd7400973c00fd46f..65ef5b3b2992325a9a07748d7a4e58aa7527f8d7 100644
--- a/Testing/Data/UnitTest/BENZENE4_g03_win_LoadGAUSSIAN_data.txt.md5
+++ b/Testing/Data/UnitTest/BENZENE4_g03_win_LoadGAUSSIAN_data.txt.md5
@@ -1 +1 @@
-68fd5ac687188f790920da484024ed16
+4696883eeecf062fba4577ed3b0f259e
diff --git a/Testing/Data/UnitTest/BENZENE4_g09_win_LoadGAUSSIAN_data.txt.md5 b/Testing/Data/UnitTest/BENZENE4_g09_win_LoadGAUSSIAN_data.txt.md5
index ee35b1de2abae1b57fe872b2fbc5574b0b8cab78..2ed1922544dfa555395122131884393e0ecda9dd 100644
--- a/Testing/Data/UnitTest/BENZENE4_g09_win_LoadGAUSSIAN_data.txt.md5
+++ b/Testing/Data/UnitTest/BENZENE4_g09_win_LoadGAUSSIAN_data.txt.md5
@@ -1 +1 @@
-d1e8191bb1a000b9634a7bcbab879c88
+ce0d7f8e7c31c1f94502a16f6c3f48c0
diff --git a/Testing/Data/UnitTest/C6H5Cl_LoadGAUSSIAN_data.txt.md5 b/Testing/Data/UnitTest/C6H5Cl_LoadGAUSSIAN_data.txt.md5
index ad631f27d09ce078a8ccdb1f1a4686c7188ed758..d9ee6881ff5abc1a925d2262fc7e7c733cac83a8 100644
--- a/Testing/Data/UnitTest/C6H5Cl_LoadGAUSSIAN_data.txt.md5
+++ b/Testing/Data/UnitTest/C6H5Cl_LoadGAUSSIAN_data.txt.md5
@@ -1 +1 @@
-5b62b6581b4b5d085b5e2ccdd25dd061
+2f667ee3b78352b19624a52e72af91fa
diff --git a/Testing/Data/UnitTest/LTA_40_O2_LoadDMOL3_data.txt.md5 b/Testing/Data/UnitTest/LTA_40_O2_LoadDMOL3_data.txt.md5
index 0415b17106893a2b29d6735e0c4ebc4ce7cc5e14..a8116ff4a332fa98e717ad74374f7b6d4a487387 100644
--- a/Testing/Data/UnitTest/LTA_40_O2_LoadDMOL3_data.txt.md5
+++ b/Testing/Data/UnitTest/LTA_40_O2_LoadDMOL3_data.txt.md5
@@ -1 +1 @@
-145ff9b579450f4c5046296e879b311d
+affa8e3bf7262f62242c9ab9649ef87b
diff --git a/Testing/Data/UnitTest/LiOH_H2O_7Li_2D2O_LoadCASTEP_data.txt.md5 b/Testing/Data/UnitTest/LiOH_H2O_7Li_2D2O_LoadCASTEP_data.txt.md5
index abd4f5884e1e4c9f1ed6af49ba306d8d710ace69..4e39ccf877dfa892e37c5c621c86d8172c6972fc 100644
--- a/Testing/Data/UnitTest/LiOH_H2O_7Li_2D2O_LoadCASTEP_data.txt.md5
+++ b/Testing/Data/UnitTest/LiOH_H2O_7Li_2D2O_LoadCASTEP_data.txt.md5
@@ -1 +1 @@
-2a07c4dd28ede6333e445bc21ac36ff9
+d48840f93de637c044aa9fbb6f4923ad
diff --git a/Testing/Data/UnitTest/MgO-222-DISP_LoadCRYSTAL_data.txt.md5 b/Testing/Data/UnitTest/MgO-222-DISP_LoadCRYSTAL_data.txt.md5
index f1382112bd76a4162e81a9435f38dbab331931d7..54990f1d710cf5984839a19d3e112c3e09deee02 100644
--- a/Testing/Data/UnitTest/MgO-222-DISP_LoadCRYSTAL_data.txt.md5
+++ b/Testing/Data/UnitTest/MgO-222-DISP_LoadCRYSTAL_data.txt.md5
@@ -1 +1 @@
-9220a905185fcc7ac537decb674a7673
+dead2ba96ea33e418435aa03adbbbad3
diff --git a/Testing/Data/UnitTest/Na2SiF6_LoadDMOL3_data.txt.md5 b/Testing/Data/UnitTest/Na2SiF6_LoadDMOL3_data.txt.md5
index f5339266c484c1e115156b2759725b09ee48902e..44301e4ec34dcfecc17f69436b1f50158eaa0103 100644
--- a/Testing/Data/UnitTest/Na2SiF6_LoadDMOL3_data.txt.md5
+++ b/Testing/Data/UnitTest/Na2SiF6_LoadDMOL3_data.txt.md5
@@ -1 +1 @@
-2694b5f494bf7d2bae55ffe3f80f80fb
+adf7b3c998a8f92eb6d393f88ef42417
diff --git a/Testing/Data/UnitTest/Si2-phonon_LoadCASTEP_data.txt.md5 b/Testing/Data/UnitTest/Si2-phonon_LoadCASTEP_data.txt.md5
index 8d792cbd95567abdd72e68fae9b2bb09fff314fc..517c5f9e6f6ffa00ab2df0212b902028a3f9d77f 100644
--- a/Testing/Data/UnitTest/Si2-phonon_LoadCASTEP_data.txt.md5
+++ b/Testing/Data/UnitTest/Si2-phonon_LoadCASTEP_data.txt.md5
@@ -1 +1 @@
-eeae67cfd917f31ba47a4d54903e413a
+ab1f26d57faf6bde66165223bee03b40
diff --git a/Testing/Data/UnitTest/Si2-sc_CalculateSPowder_S.txt.md5 b/Testing/Data/UnitTest/Si2-sc_CalculateSPowder_S.txt.md5
index 1ee0290220962180efe45738c4e5366cd2a691ff..ddd6182220a422991a9c36bd042b1902a1cdf2c3 100644
--- a/Testing/Data/UnitTest/Si2-sc_CalculateSPowder_S.txt.md5
+++ b/Testing/Data/UnitTest/Si2-sc_CalculateSPowder_S.txt.md5
@@ -1 +1 @@
-4b77bac3f8c1dc54f63bd41ca3075c48
+b902b1a553ca72bb575d703eec4515f0
diff --git a/Testing/Data/UnitTest/Si2-sc_LoadCASTEP_data.txt.md5 b/Testing/Data/UnitTest/Si2-sc_LoadCASTEP_data.txt.md5
index 3d55344ffb90cb0291b5696460903a9082c867ae..f1b5d3039c7711f984784982f67add736f8521ba 100644
--- a/Testing/Data/UnitTest/Si2-sc_LoadCASTEP_data.txt.md5
+++ b/Testing/Data/UnitTest/Si2-sc_LoadCASTEP_data.txt.md5
@@ -1 +1 @@
-9fa52167843fdad8759a534836796771
+4a8f8bb59c8148cad80fc02db83d3083
diff --git a/Testing/Data/UnitTest/crystalB3LYP_LoadCRYSTAL_data.txt.md5 b/Testing/Data/UnitTest/crystalB3LYP_LoadCRYSTAL_data.txt.md5
index 1b92744717bacd36891dbe69574b35c63119dd9c..dbc1fe214aca0f97685574f50dc3078769b0e7aa 100644
--- a/Testing/Data/UnitTest/crystalB3LYP_LoadCRYSTAL_data.txt.md5
+++ b/Testing/Data/UnitTest/crystalB3LYP_LoadCRYSTAL_data.txt.md5
@@ -1 +1 @@
-1a3f3375ab6bc11d5c76d7e4441aabc7
+c5bd42405043b4e856a674f4a09fdfd1
diff --git a/Testing/Data/UnitTest/crystal_set_key_LoadCRYSTAL_data.txt.md5 b/Testing/Data/UnitTest/crystal_set_key_LoadCRYSTAL_data.txt.md5
index 6967e7b0e97e4a3b7460c4df218e225665de6404..83926d55313e18b0d109736806d8880dccb807ea 100644
--- a/Testing/Data/UnitTest/crystal_set_key_LoadCRYSTAL_data.txt.md5
+++ b/Testing/Data/UnitTest/crystal_set_key_LoadCRYSTAL_data.txt.md5
@@ -1 +1 @@
-21038d504c2265f7c7046c68a6fd2ac8
+a450d26388737291ad786f99e254b4a3
diff --git a/Testing/Data/UnitTest/mgo-GX_LoadCRYSTAL_data.txt.md5 b/Testing/Data/UnitTest/mgo-GX_LoadCRYSTAL_data.txt.md5
index 738678029e7c78703f0fd503218b3f820c1dd324..3dfc3fefe5b28179bbd2d1e57a09c839d56b23ea 100644
--- a/Testing/Data/UnitTest/mgo-GX_LoadCRYSTAL_data.txt.md5
+++ b/Testing/Data/UnitTest/mgo-GX_LoadCRYSTAL_data.txt.md5
@@ -1 +1 @@
-65d8d0a26ab8a2ae2fe50e6d7b392fac
+912c101df8f4aa2ebafadac75415040e
diff --git a/Testing/Data/UnitTest/squaricn_no_sum_LoadCASTEP_data.txt.md5 b/Testing/Data/UnitTest/squaricn_no_sum_LoadCASTEP_data.txt.md5
index 1d3350a3bcc37ad24b42865b7a2cf93357d85195..b6233443d8c92718d4acc7a128730bf3185dc14c 100644
--- a/Testing/Data/UnitTest/squaricn_no_sum_LoadCASTEP_data.txt.md5
+++ b/Testing/Data/UnitTest/squaricn_no_sum_LoadCASTEP_data.txt.md5
@@ -1 +1 @@
-9f499eb0c6fbce018cadd20787ff1353
+17e43d18f376c8739b1ef7ea4a864413
diff --git a/Testing/Data/UnitTest/squaricn_sum_LoadCASTEP_data.txt.md5 b/Testing/Data/UnitTest/squaricn_sum_LoadCASTEP_data.txt.md5
index 8c200f91356951ed05bb439ee1bb6526e827abe8..aa2ca9d42479442ae47f951791d893887740802f 100644
--- a/Testing/Data/UnitTest/squaricn_sum_LoadCASTEP_data.txt.md5
+++ b/Testing/Data/UnitTest/squaricn_sum_LoadCASTEP_data.txt.md5
@@ -1 +1 @@
-3748b8a102f7e57c424fd831a3e5748a
+b14c0f98ce5a97d9ccd687463677d9c6
diff --git a/Testing/Data/UnitTest/toluene_molecule_LoadCRYSTAL17_data.txt.md5 b/Testing/Data/UnitTest/toluene_molecule_LoadCRYSTAL17_data.txt.md5
index 9c0d4e946b8c531c6dfe16f732e87d75e79bdc40..d8c6577d43d787e1ecca99dd106af3ffbe828882 100644
--- a/Testing/Data/UnitTest/toluene_molecule_LoadCRYSTAL17_data.txt.md5
+++ b/Testing/Data/UnitTest/toluene_molecule_LoadCRYSTAL17_data.txt.md5
@@ -1 +1 @@
-dcedfbda8976f92bc8775e1e1aa15362
+4e70ecd1aed776b8e83d78b150898cf3
diff --git a/Testing/Data/UnitTest/toluene_molecule_LoadCRYSTAL_data.txt.md5 b/Testing/Data/UnitTest/toluene_molecule_LoadCRYSTAL_data.txt.md5
index 9d1e360365c31b8efcd695e2fa439e853c65579e..401dd474c0eeb6f70f1318d2e4e3f55952df946d 100644
--- a/Testing/Data/UnitTest/toluene_molecule_LoadCRYSTAL_data.txt.md5
+++ b/Testing/Data/UnitTest/toluene_molecule_LoadCRYSTAL_data.txt.md5
@@ -1 +1 @@
-28b3a9ef50cf1b018aa93527b921bf24
+6ab7231dfe3e856749070a5d0119ebd5
diff --git a/docs/source/algorithms/Abins-v1.rst b/docs/source/algorithms/Abins-v1.rst
index 6608912de5454e5baf2b983a86ad50b36f6c95d5..acc101ee47780c113b4152fb2d2f5827a9f33164 100644
--- a/docs/source/algorithms/Abins-v1.rst
+++ b/docs/source/algorithms/Abins-v1.rst
@@ -45,6 +45,7 @@ More information about the implemented working equations can be found :ref:`here
 
 Abins is in constant development and suggestions for improvements are very welcome. For any such contributions please
 contact Dr. Sanghamitra Mukhopadhyay (sanghamitra.mukhopadhyay@stfc.ac.uk).
+If you are developing or hacking on Abins, see the :ref:`AbinsImplementation` notes which outline some of the key conventions, practices and pitfalls.
 
 If Abins is used as part of your data analysis routines, please cite the relevant reference [1]_.
 
diff --git a/docs/source/concepts/AbinsImplementation.rst b/docs/source/concepts/AbinsImplementation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3b183227fa4fef41bc4b65d2c33e9dd0b04e9bee
--- /dev/null
+++ b/docs/source/concepts/AbinsImplementation.rst
@@ -0,0 +1,147 @@
+.. _AbinsImplementation:
+
+Abins: Implementation details
+=============================
+
+.. contents::
+
+
+Introduction
+------------
+
+Abins is a relatively complex algorithm with some unique
+infrastructure and built-in design decisions. Details are collected
+here: this is primarily for the benefit of those developing or
+hacking Abins.
+
+
+Deprecation plans
+-----------------
+
+- The *pkt_per_peak* and *fwhm* parameters in
+  ``AbinsParameters.sampling`` are no longer in use and should be
+  removed in a future release.
+
+- The "SingleCrystal" modules and objects support non-existent
+  functionality and should be removed. They may return when that
+  functionality is added, but it is likely that their interfaces will
+  change in the process.
+
+- The *frequencies_threshold* parameter in
+  ``AbinsParameters.sampling`` is currently non-functional and should
+  be removed until it *is* functional.
+
+
+Sampling
+--------
+
+While the scattering cross-sections are calculated for individual
+events and enumerated combinations of events, the output spectra are
+histograms which have been resampled and broadened on a finite grid.
+
+The data range is determined by three parameters:
+
+- *min_wavenumber*, set in ``AbinsParameters.sampling``
+- *max_wavenumber*, set in ``AbinsParameters.sampling``
+- *bin_width*, a parameter set in the main Abins algorithm interface
+  and passed as an argument to internal functions as appropriate. This
+  parameter is more exposed than the energy range, as a convenient to
+  the user. However, it should be applied consistently within a single
+  application of Abins and functions may assume that this value was
+  also used elsewhere.
+
+These parameters are used to establish the edges of the sample *bins*;
+the largest value is rounded up from *max_wavenumber* to contain a
+whole number of *bin_width*.
+
+The histogram of data sampled in these N bins has N-1 "points", which
+should be aligned with the corresponding bin centres if plotted as a
+line. In the code this array of frequency coordinates is generally
+named *freq_points*.
+
+Broadening
+----------
+
+Instrumental broadening in Abins involves an energy-dependent
+resolution function; in the implemented case (the TOSCA instrument)
+this is convolution with a Gaussian kernel with the energy-dependent
+width parameter (sigma) determined by a quadratic polynomial.
+Convolution with a varying kernel is not a common operation and is
+implemented within AbinsModules.
+
+Earlier versions of Abins implemented a Gaussian kernel with a
+fixed number of points spread over a range scaled to the peak width,
+set by *pkt_per_peak* and *fwhm* parameters in
+``AbinsParameters.sampling``.
+This method leads to aliasing when the x-coordinates are
+incommensurate with the histogram spacing, and an uneven number of
+samples falls into each bin.
+
+.. image:: ../images/gaussian_aliasing.png
+    :align: center
+
+The safest way to avoid such trouble is for all broadening methods to
+output onto the regular *freq_points* grid. A number of broadening
+implementations have been provided in
+``AbinsModules.Instruments.Broadening``. It is up to the Instrument
+logic to dispatch broadening calls to the requested implementation,
+and it is up to specific Instruments to select an appropriate scheme
+for their needs.
+The advanced parameter *AbinsParameters.sampling['broadening_scheme']*
+is made available so that this can be overruled, but it is up to the
+Instrument to interpret this information. 'auto' should select an
+intelligent scheme and inappropriate methods can be forbidden.
+
+A fast, possibly-novel method of frequency-dependent broadening has
+been implemented as the 'interpolate' method. For notes on this method
+and its limitations see :ref:`AbinsInterpolatedBroadening`.
+
+Testing
+-------
+
+Tests for Abins are located in a few places:
+
+Unit tests
+~~~~~~~~~~
+Unit tests for the Python AbinsModules are in *scripts/test/Abins*.
+These are set up with the other unit tests (``cmake --build . --target AllTests``)
+and can be run by filtering for Abins (``ctest -R Abins``).
+This will also run the Algorithm tests (next section).
+
+Some of these tests load input data and check that the
+structure/vibration data has been read correctly. There is a
+consistent framework for this type of test established in
+``AbinsModules.GeneralLoadAbiInitioTester`` and inherited by
+code-specific tests e.g. *AbinsLoadGAUSSIANTest*.  To create a new
+reference data file with a specific build of Abins, instantiate a
+Loader and pass it to *save_ab_initio_test_data*. This takes two lines of Python, e.g.:
+
+:: python
+   reader = AbinsModules.LoadCRYSTAL(input_ab_initio_filename='my_calc.out')
+   AbinsModules.GeneralLoadAbInitioTester.save_ab_initio_test_data(reader, 'my_new_test')
+
+which will write the necessary *my_new_test_data.txt* file and
+*my_new_test_atomic_displacements_data_{k}.txt* files for each phonon wavevector.
+
+Algorithm tests
+~~~~~~~~~~~~~~~
+Tests of the main Abins algorithm and of the advanced parameter
+settings are in
+*Framework/PythonInterface/test/python/plugins/algorithms*. These
+mostly cover input sanity-checking, and check the consistency of
+result scaling and splitting into atomic contributions.
+
+System tests
+~~~~~~~~~~~~
+System tests are defined in *Testing/SystemTests/tests/analysis/AbinsTest.py*.
+These tests compare the output workspaces of Abins runs with reference Nexus files,
+using the standard setup described in
+`the main developer docs <http://developer.mantidproject.org/SystemTests.html>`_.
+The reference data will need to be changed when major updates to Abins
+impact the output results; the simplest way to obtain the new
+reference files is to run the system tests, which will save Nexus
+files from the failed system tests. These should be inspected to
+verify that all changes were expected and understood as consequences
+of changes to Abins.
+
+.. categories:: Concepts
diff --git a/docs/source/concepts/AbinsInterpolatedBroadening.rst b/docs/source/concepts/AbinsInterpolatedBroadening.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ec6e2da241a7b5ad67602b95f01772143f28ca33
--- /dev/null
+++ b/docs/source/concepts/AbinsInterpolatedBroadening.rst
@@ -0,0 +1,367 @@
+.. _AbinsInterpolatedBroadening:
+
+Abins: Fast approximate broadening with "interpolate"
+=====================================================
+
+.. contents::
+
+The "interpolate" scheme in ``AbinsModules.Instruments.Broadening``
+estimates broadened spectra using a limited number of kernel
+evaluations or convolution steps in order to reduce the computation
+time. The method appears to be novel, so some explanation is needed.
+
+Consider first that we can approximate a Gaussian function by a linear
+combination of two other Gaussians; one narrower and one wider. If the
+mixing parameters are linearly based on the relationship between the
+widths the results are not impressive:
+
+.. plot::
+
+   from __future__ import (absolute_import, division, print_function, unicode_literals)
+
+   import numpy as np
+   from scipy.optimize import curve_fit
+   import matplotlib.pyplot as plt
+   from matplotlib.lines import Line2D
+
+   def gaussian(x, sigma=2, center=0):
+       g = np.exp(-0.5 * ((x - center) / sigma)**2) / (sigma * np.sqrt(2 * np.pi))
+       return g
+
+   margin = 0.05
+
+   def plot_linear_interp():
+       """Plot linearly-interpolated Gaussians with wide sigma range"""
+
+       g1_center = 0
+       g2_center = 40
+       sigma_max = 4
+       sigma_min = 1
+
+       x = np.linspace(-10, 50, 401)
+
+       fig, ax = plt.subplots()
+       for sigma, color in zip(np.linspace(sigma_min, sigma_max, 5),
+                               ['C0', 'C1', 'C2', 'C3', 'C4']):
+           center = (g1_center
+                     + ((sigma - sigma_min)
+                        * (g2_center - g1_center) / (sigma_max - sigma_min)))
+           ax.plot(x, gaussian(x, sigma=sigma, center=center), c=color)
+
+           low_ref = gaussian(x, sigma=sigma_min, center=center)
+           high_ref = gaussian(x, sigma=sigma_max, center=center)
+           mix = (sigma - sigma_min) / (sigma_max - sigma_min)
+           ax.plot(x, (1 - mix) * low_ref + mix * high_ref,
+                   c=color, linestyle='--')
+
+       ax.set_xlim([-10, 50])
+       ax.set_ylim([0, None])
+       ax.tick_params(labelbottom=False, labelleft=False)
+
+       custom_lines = [Line2D([0], [0], color='k', linestyle='-', lw=2),
+                       Line2D([0], [0], color='k', linestyle='--', lw=2)]
+
+       ax.legend(custom_lines, ['Exact', 'Linear interpolation'])
+       fig.subplots_adjust(left=margin, bottom=margin,
+                           right=(1 - margin), top=(1 - margin))
+
+   plot_linear_interp()
+
+
+But if we optimise the mixing parameter at each width then the
+magnitudes improve significantly, even if the shapes remain distinctly non-Gaussian:
+
+.. plot::
+
+   from __future__ import (absolute_import, division, print_function, unicode_literals)
+
+   import numpy as np
+   from scipy.optimize import curve_fit
+   import matplotlib.pyplot as plt
+   from matplotlib.lines import Line2D
+
+   def gaussian(x, sigma=2, center=0):
+       g = np.exp(-0.5 * ((x - center) / sigma)**2) / (sigma * np.sqrt(2 * np.pi))
+       return g
+
+   margin = 0.05
+
+   def plot_optimised_interp(sigma_max=4):
+       g1_center = 0
+       g2_center = 40
+       sigma_min = 1
+
+       x = np.linspace(-10, 10, 101)
+       npts = 7
+
+       fig, [ax1, ax2, ax3] = plt.subplots(nrows=3,
+                                           sharex=True,
+                                           gridspec_kw={
+                                               'height_ratios': [3, 1, 1]})
+       mix1_list, mix2_list = [], []
+
+       def gaussian_mix(x, w1, w2):
+           """Return a linear combination of two Gaussians with weights"""
+           return (w1 * gaussian(x, sigma=sigma_min)
+                   + w2 * gaussian(x, sigma=sigma_max))
+
+
+       for sigma, color in zip(np.linspace(sigma_min, sigma_max, npts),
+                               ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6']):
+           ydata = gaussian(x, sigma=sigma)
+           (mix1, mix2), _ = curve_fit(gaussian_mix, x, ydata, p0=[0.5, 0.5])
+
+           x_offset = (g1_center
+                       + ((sigma - sigma_min)
+                          * (g2_center - g1_center) / (sigma_max - sigma_min)))
+           actual = gaussian(x, sigma=sigma)
+           est = gaussian_mix(x, mix1, mix2)
+           rms = np.sqrt(np.mean((actual - est)**2))
+           ax1.plot(x + x_offset, actual, color=color)
+           ax1.plot(x + x_offset, est, color=color, linestyle='--')
+           ax2.plot([x_offset], [rms], 'o', c='C0')
+
+           mix1_list.append(mix1)
+           mix2_list.append(mix2)
+
+
+       custom_lines = [Line2D([0], [0], color='k', linestyle='-', lw=2),
+                       Line2D([0], [0], color='k', linestyle='--', lw=2)]
+
+       ax1.legend(custom_lines, ['Exact', 'Optimised interpolation'])
+
+       ax2.set_ylabel('RMS error')
+
+       ax3.plot(np.linspace(g1_center, g2_center, npts), mix1_list)
+       ax3.plot(np.linspace(g1_center, g2_center, npts), mix2_list)
+       ax3.set_ylabel('Weights')
+       ax3.set_ylim([0, 1])
+
+   plot_optimised_interp(sigma_max=4)
+
+
+This error is closely related to the width difference between the
+endpoints. Here the range is reduced from a factor 4 to a factor 2,
+and the resulting functions are visually quite convincing
+
+.. plot::
+
+   from __future__ import (absolute_import, division, print_function, unicode_literals)
+
+   import numpy as np
+   from scipy.optimize import curve_fit
+   import matplotlib.pyplot as plt
+   from matplotlib.lines import Line2D
+
+   def gaussian(x, sigma=2, center=0):
+       g = np.exp(-0.5 * ((x - center) / sigma)**2) / (sigma * np.sqrt(2 * np.pi))
+       return g
+
+   margin = 0.05
+
+   def plot_optimised_interp(sigma_max=4):
+       g1_center = 0
+       g2_center = 40
+       sigma_min = 1
+
+       x = np.linspace(-10, 10, 101)
+       npts = 7
+
+       fig, [ax1, ax2, ax3] = plt.subplots(nrows=3,
+                                           sharex=True,
+                                           gridspec_kw={
+                                               'height_ratios': [3, 1, 1]})
+       mix1_list, mix2_list = [], []
+
+       def gaussian_mix(x, w1, w2):
+           """Return a linear combination of two Gaussians with weights"""
+           return (w1 * gaussian(x, sigma=sigma_min)
+                   + w2 * gaussian(x, sigma=sigma_max))
+
+
+       for sigma, color in zip(np.linspace(sigma_min, sigma_max, npts),
+                               ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6']):
+           ydata = gaussian(x, sigma=sigma)
+           (mix1, mix2), _ = curve_fit(gaussian_mix, x, ydata, p0=[0.5, 0.5])
+
+           x_offset = (g1_center
+                       + ((sigma - sigma_min)
+                          * (g2_center - g1_center) / (sigma_max - sigma_min)))
+           actual = gaussian(x, sigma=sigma)
+           est = gaussian_mix(x, mix1, mix2)
+           rms = np.sqrt(np.mean((actual - est)**2))
+           ax1.plot(x + x_offset, actual, color=color)
+           ax1.plot(x + x_offset, est, color=color, linestyle='--')
+           ax2.plot([x_offset], [rms], 'o', c='C0')
+
+           mix1_list.append(mix1)
+           mix2_list.append(mix2)
+
+
+       custom_lines = [Line2D([0], [0], color='k', linestyle='-', lw=2),
+                       Line2D([0], [0], color='k', linestyle='--', lw=2)]
+
+       ax1.legend(custom_lines, ['Exact', 'Optimised interpolation'])
+
+       ax2.set_ylabel('RMS error')
+
+       ax3.plot(np.linspace(g1_center, g2_center, npts), mix1_list)
+       ax3.plot(np.linspace(g1_center, g2_center, npts), mix2_list)
+       ax3.set_ylabel('Weights')
+       ax3.set_ylim([0, 1])
+
+   plot_optimised_interp(sigma_max=2)
+
+while a gap of :math:`\sqrt{2}` is practically indistinguishable with error below 1% of the peak maximum.
+
+.. plot::
+
+   from __future__ import (absolute_import, division, print_function, unicode_literals)
+
+   import numpy as np
+   from scipy.optimize import curve_fit
+   import matplotlib.pyplot as plt
+   from matplotlib.lines import Line2D
+
+   def gaussian(x, sigma=2, center=0):
+       g = np.exp(-0.5 * ((x - center) / sigma)**2) / (sigma * np.sqrt(2 * np.pi))
+       return g
+
+   margin = 0.05
+
+   def plot_optimised_interp(sigma_max=4):
+       g1_center = 0
+       g2_center = 40
+       sigma_min = 1
+
+       x = np.linspace(-10, 10, 101)
+       npts = 7
+
+       fig, [ax1, ax2, ax3] = plt.subplots(nrows=3,
+                                           sharex=True,
+                                           gridspec_kw={
+                                               'height_ratios': [3, 1, 1]})
+       mix1_list, mix2_list = [], []
+
+       def gaussian_mix(x, w1, w2):
+           """Return a linear combination of two Gaussians with weights"""
+           return (w1 * gaussian(x, sigma=sigma_min)
+                   + w2 * gaussian(x, sigma=sigma_max))
+
+
+       for sigma, color in zip(np.linspace(sigma_min, sigma_max, npts),
+                               ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6']):
+           ydata = gaussian(x, sigma=sigma)
+           (mix1, mix2), _ = curve_fit(gaussian_mix, x, ydata, p0=[0.5, 0.5])
+
+           x_offset = (g1_center
+                       + ((sigma - sigma_min)
+                          * (g2_center - g1_center) / (sigma_max - sigma_min)))
+           actual = gaussian(x, sigma=sigma)
+           est = gaussian_mix(x, mix1, mix2)
+           rms = np.sqrt(np.mean((actual - est)**2))
+           ax1.plot(x + x_offset, actual, color=color)
+           ax1.plot(x + x_offset, est, color=color, linestyle='--')
+           ax2.plot([x_offset], [rms], 'o', c='C0')
+
+           mix1_list.append(mix1)
+           mix2_list.append(mix2)
+
+
+       custom_lines = [Line2D([0], [0], color='k', linestyle='-', lw=2),
+                       Line2D([0], [0], color='k', linestyle='--', lw=2)]
+
+       ax1.legend(custom_lines, ['Exact', 'Optimised interpolation'])
+
+       ax2.set_ylabel('RMS error')
+
+       ax3.plot(np.linspace(g1_center, g2_center, npts), mix1_list)
+       ax3.plot(np.linspace(g1_center, g2_center, npts), mix2_list)
+       ax3.set_ylabel('Weights')
+       ax3.set_ylim([0, 1])
+
+   plot_optimised_interp(sigma_max=np.sqrt(2))
+
+For TOSCA :math:`\sigma = a f^2 + b f + c` where :math:`a, b, c$ = $10^{-7}, 0.005, 2.5`. For an energy range of 32 cm\ :sup:`-1` to 4100 cm\ :sup:`-1` sigma ranges from 2.66 to 24.68, which could covered by 5 Gaussians separated by width factor 2 or 9 Gaussians seperated by width factor :math:`\sqrt{2}`.
+This could present a significant cost saving compared to full evaluation of ~4000 convolution kernels (one per convolution bin).
+
+We can build on this by performing convolution of the full spectrum with each of the sampled kernels, and then interpolate *between the spectra* using the predetermined mixing weights. The convolution is performed efficiently using FFTs, and relatively little memory is required to hold this limited number of spectra and interpolate between them.
+
+.. plot::
+
+   from __future__ import (absolute_import, division, print_function, unicode_literals)
+
+   import matplotlib.pyplot as plt
+   import numpy as np
+   from AbinsModules.Instruments import Broadening
+
+   bins = np.linspace(0, 100, 1001, dtype=np.float64)
+   frequencies = (bins[:-1] + bins [1:]) / 2
+
+   # Generate synthetic data with two peaks
+   intensities = np.zeros_like(frequencies)
+   peak1_loc = 300
+   peak2_loc = 600
+   intensities[peak1_loc] = 1.5
+   intensities[peak2_loc] = 1
+
+   sigma = np.linspace(1, 10, 1000)
+   peak1_sigma = sigma[peak1_loc]
+   peak2_sigma = sigma[peak2_loc]
+
+   fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True, figsize=(8,6))
+
+   # Original spectrum
+   ax1.plot(frequencies, intensities, 'k-', label='Unbroadened spectrum')
+
+   # Narrow limit
+   freq_points, spectrum = Broadening.broaden_spectrum(
+       frequencies, bins, intensities,
+       (peak1_sigma * np.ones_like(frequencies)),
+       scheme='gaussian')
+   ax2.plot(freq_points, spectrum, label='Convolve with min(sigma)')
+
+   # Broad limit
+   freq_points, spectrum = Broadening.broaden_spectrum(
+       frequencies, bins, intensities,
+       (peak2_sigma * np.ones_like(frequencies)),
+       scheme='gaussian')
+   ax2.plot(freq_points, spectrum, label='Convolve with max(sigma)')
+
+   # Reference method: sum individually
+   freq_points, spectrum = Broadening.broaden_spectrum(
+       frequencies, bins, intensities, sigma, scheme='gaussian')
+   ax3.plot(freq_points, spectrum, 'k-', label='Sum individual peaks')
+
+   # Interpolated
+   freq_points, spectrum = Broadening.broaden_spectrum(
+       frequencies, bins, intensities, sigma, scheme='interpolate')
+   ax2.plot(freq_points, spectrum, c='C2', linestyle='--', label='Interpolated', zorder=0.5)
+   ax3.plot(freq_points, spectrum, c='C2', linestyle='--', label='Interpolated', zorder=0.5)
+
+   ax1.legend()
+   ax2.legend()
+   ax3.legend()
+
+   for ax in ax1, ax2, ax3:
+       ax.tick_params(labelbottom=False, labelleft=False)
+
+   margin=0.05
+   fig.subplots_adjust(left=margin, right=(1-margin), bottom=margin, top=(1-margin))
+
+   fig.savefig('abins_interp_broadening_schematic.png')
+
+This procedure is not strictly equivalent to a summation over frequency-dependent functions, even if there is no interpolation error.
+At each energy coordinate :math:`\epsilon` we "see" a fragment of full spectrum convolved at the same width as any points at :math:`\epsilon` would be.
+In a typical indirect INS spectrum which becomes broader at high energy, this would overestimate the contribution from peaks originating below this :math:`\epsilon` and underestimate the contribution from peaks originating above :math:`\epsilon`.
+As a result, peaks will appear asymmetric.
+In practice, the magnitude of this error depends on the rate of change of :math:`\sigma` relative to the size of :math:`\sigma`.
+In the case of the TOSCA parameters, the error is very small. This should be re-evaluated for other instruments with different energy-dependent broadening functions.
+
+.. image:: ../images/abins-interpolation-benzene.png
+
+We can see the artefacts of this approach more clearly if we use fewer Gaussians (spaced by factor 2) and zoom in on the spectrum. The interpolation method has a tendency to show small peaks at turning points; this may be related to the imperfection in the shape of the smooth bell.
+
+.. image:: ../images/abins-interpolation-zoom.png
+
+.. categories:: Concepts
diff --git a/docs/source/images/abins-interpolation-benzene.png b/docs/source/images/abins-interpolation-benzene.png
new file mode 100644
index 0000000000000000000000000000000000000000..48a15807d22a304e4d26d948e91080701e7ae0e1
Binary files /dev/null and b/docs/source/images/abins-interpolation-benzene.png differ
diff --git a/docs/source/images/abins-interpolation-zoom.png b/docs/source/images/abins-interpolation-zoom.png
new file mode 100644
index 0000000000000000000000000000000000000000..77cd2a6ec83035b76046669326bc62a2deb5d13e
Binary files /dev/null and b/docs/source/images/abins-interpolation-zoom.png differ
diff --git a/docs/source/images/gaussian_aliasing.png b/docs/source/images/gaussian_aliasing.png
new file mode 100644
index 0000000000000000000000000000000000000000..736452ea438ceb43bf5e2621b470450408f77906
Binary files /dev/null and b/docs/source/images/gaussian_aliasing.png differ
diff --git a/docs/source/release/v4.3.0/indirect_geometry.rst b/docs/source/release/v4.3.0/indirect_geometry.rst
index a452189d03aa202c8f3841b411727de841efafa4..a9998661616ba5dab15faacbff44750c74e11cb4 100644
--- a/docs/source/release/v4.3.0/indirect_geometry.rst
+++ b/docs/source/release/v4.3.0/indirect_geometry.rst
@@ -10,6 +10,21 @@ Indirect Geometry Changes
     improvements, followed by bug fixes.
 
 
+Improvements
+############
+
+- In Abins the instrumental broadening implementation has been overhauled.
+
+  - Output spectra from Abins should now be smooth and free of artefacts.
+  - A fast approximate scheme is implemented and automatically applied
+    when appropriate for simulations of TOSCA spectra. Alternative
+    broadening implementations may be selected through the
+    AbinsParameters system, including slow reference methods.
+  - Spectra now follow the broader Mantid convention for histograms:
+    values correspond to frequencies at the mid-points of the
+    histogram bins. (Previously the values would correspond to one
+    bin-edge.)
+
 BugFixes
 ########
 
diff --git a/scripts/AbinsModules/AbinsData.py b/scripts/AbinsModules/AbinsData.py
index 5a3752bf8afab91c43e2151eff726d057df8fbe4..46b6f414031655eb90ea895682d6245b6b9cb069 100644
--- a/scripts/AbinsModules/AbinsData.py
+++ b/scripts/AbinsModules/AbinsData.py
@@ -19,6 +19,27 @@ class AbinsData(AbinsModules.GeneralData):
         self._kpoints_data = None
         self._data = None
 
+    @staticmethod
+    def from_calculation_data(filename, ab_initio_program):
+        """
+        Get AbinsData from ab initio calculation output file.
+
+        :param filename: Path to vibration/phonon data file
+        :type filename: str
+        :param ab_initio_program: Program which generated data file; this should be a key in AbinsData.ab_initio_loaders
+        :type ab_initio_program: str
+        """
+        # This should live closer to the Loaders but for now it is the only place the dict is used.
+        ab_initio_loaders = {"CASTEP": AbinsModules.LoadCASTEP, "CRYSTAL": AbinsModules.LoadCRYSTAL,
+                             "DMOL3": AbinsModules.LoadDMOL3, "GAUSSIAN": AbinsModules.LoadGAUSSIAN}
+
+        if ab_initio_program.upper() not in ab_initio_loaders:
+            raise ValueError("No loader available for {}: unknown program. "
+                             "supported loaders: {}".format(ab_initio_program.upper(),
+                                                            ' '.join(ab_initio_loaders.keys())))
+        loader = ab_initio_loaders[ab_initio_program.upper()](input_ab_initio_filename=filename)
+        return loader.get_formatted_data()
+
     def set(self, k_points_data=None, atoms_data=None):
         """
 
@@ -39,10 +60,10 @@ class AbinsData(AbinsModules.GeneralData):
         self._data = {"k_points_data": k_points_data.extract(), "atoms_data": atoms_data.extract()}
 
     def get_kpoints_data(self):
-            return self._kpoints_data
+        return self._kpoints_data
 
     def get_atoms_data(self):
-            return self._atoms_data
+        return self._atoms_data
 
     def extract(self):
         for k in self._data["k_points_data"]["atomic_displacements"]:
diff --git a/scripts/AbinsModules/AbinsParameters.py b/scripts/AbinsModules/AbinsParameters.py
index 8cdc801d9e26b2e977df0a789ef8e62693bb628e..1a14741c343b7eda52986d3dd434d9845ab1b7d1 100644
--- a/scripts/AbinsModules/AbinsParameters.py
+++ b/scripts/AbinsModules/AbinsParameters.py
@@ -45,12 +45,12 @@ hdf_groups = {
 
 # Parameters related to sampling density and numerical cutoffs ##########################
 sampling = {
-    'pkt_per_peak': 50,  # number of points for each peak broadened by the experimental resolution
     'max_wavenumber': 4100.0,  # maximum wavenumber in cm^-1 taken into account while creating workspaces (exclusive)
     'min_wavenumber': 0.0,  # minimal wavenumber in cm^-1 taken into account while creating workspaces (exclusive)
     'frequencies_threshold': 0.0,  # minimum included frequency
     's_relative_threshold': 0.01,  # low cutoff for S intensity (fraction of maximum S)
-    's_absolute_threshold': 10e-8  # low cutoff for S intentisty (absolute value)
+    's_absolute_threshold': 10e-8,  # low cutoff for S intensity (absolute value)
+    'broadening_scheme': 'auto',
     }
 
 # Parameters related to performance optimisation that do NOT impact calculation results
diff --git a/scripts/AbinsModules/CalculateS.py b/scripts/AbinsModules/CalculateS.py
index b8564a73db91855a180da767659df9e032dfb936..5fa526ea6b604d840225fb2780175dd8f76c7f16 100644
--- a/scripts/AbinsModules/CalculateS.py
+++ b/scripts/AbinsModules/CalculateS.py
@@ -5,6 +5,7 @@
 #     & Institut Laue - Langevin
 # SPDX - License - Identifier: GPL - 3.0 +
 from __future__ import (absolute_import, division, print_function)
+import numpy as np
 import AbinsModules
 
 
@@ -42,3 +43,32 @@ class CalculateS(object):
                 raise ValueError("Only implementation for sample in the form of powder is available.")
         else:
             raise ValueError("Invalid sample form %s" % sample_form)
+
+    @classmethod
+    def write_test_data(cls, out_file, **kwargs):
+        """
+        Write test data e.g. for CalculateSPowder test as a simple text dump
+
+        The format is liable to change as part of a maintenance clean-up, so is not recommended for use in user scripts.
+
+        :param out_file: Path to output test data file
+        :type out_file: str
+
+        This method will pass all other options to :obj:`CalculateS.init()` (filename, temperature etc.)
+        If you need to generate abins_data, the most convenient way of doing this is with
+        AbinsData.from_calculation_data(filename, ab_initio_program)
+        """
+
+        data = cls.init(**kwargs).get_formatted_data()
+
+        # This function returns a _copy_ of the options so we can use it as a backup
+        printoptions = np.get_printoptions()
+
+        try:
+            np.set_printoptions(threshold=np.nan)
+            with open(out_file, 'w') as f:
+                f.write(str(data.extract()))
+        # We probably don't want full-sized array printing if something goes wrong,
+        # so use `finally` to ensure np verbosity is reset
+        finally:
+            np.set_printoptions(**printoptions)
diff --git a/scripts/AbinsModules/GeneralLoadAbInitioTester.py b/scripts/AbinsModules/GeneralLoadAbInitioTester.py
index 08688c7f809dff764e6c5adb0124e17c5e4d6fc2..f26ce6235ef114c49288fcc71050b6cf01d3adb3 100644
--- a/scripts/AbinsModules/GeneralLoadAbInitioTester.py
+++ b/scripts/AbinsModules/GeneralLoadAbInitioTester.py
@@ -73,6 +73,7 @@ class GeneralLoadAbInitioTester(object):
         self.assertEqual(correct_data["attributes"]["ab_initio_program"], data["attributes"]["ab_initio_program"])
         # advanced_parameters is stored as a str but we unpack/compare to dict
         # so comparison will tolerate unimportant formatting changes
+
         self.assertEqual(correct_data["attributes"]["advanced_parameters"],
                          json.loads(data["attributes"]["advanced_parameters"]))
 
diff --git a/scripts/AbinsModules/Instruments/Broadening.py b/scripts/AbinsModules/Instruments/Broadening.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8a20f01d2f45e6dd9d2352f339055659fa001f7
--- /dev/null
+++ b/scripts/AbinsModules/Instruments/Broadening.py
@@ -0,0 +1,575 @@
+# Mantid Repository : https://github.com/mantidproject/mantid
+#
+# Copyright &copy; 2019 ISIS Rutherford Appleton Laboratory UKRI,
+#     NScD Oak Ridge National Laboratory, European Spallation Source
+#     & Institut Laue - Langevin
+# SPDX - License - Identifier: GPL - 3.0 +
+from __future__ import (absolute_import, division, print_function)
+import numpy as np
+from scipy.special import erf
+from scipy.signal import convolve
+
+prebin_required_schemes = ['interpolate', 'interpolate_coarse']
+
+
+def broaden_spectrum(frequencies, bins, s_dft, sigma, scheme='gaussian_truncated'):
+    """Convert frequency/S data to broadened spectrum on a regular grid
+
+    Several algorithms are implemented, for purposes of
+    performance/accuracy optimisation and demonstration of legacy
+    behaviour.
+
+    :param frequencies: input frequency series; these may be irregularly spaced
+    :type frequencies: 1D array-like
+    :param bins:
+        Evenly-spaced frequency bin values for the output spectrum. If *frequencies* is None, s_dft is assumed to
+        correspond to mid-point frequencies on this mesh.
+    :type bins: 1D array-like
+    :param s_dft: scattering values corresponding to *frequencies*
+    :type s_dft: 1D array-like
+    :param sigma:
+        width of broadening function. This should be a scalar used over the whole spectrum, or a series of values
+        corresponding to *frequencies*.
+    :type sigma: float or 1D array-like
+    :param scheme:
+        Name of broadening method used. Options:
+
+        - none: Return the input data as a histogram, ignoring the value of sigma
+        - gaussian: Evaluate a Gaussian on the output grid for every input point and sum them. Simple but slow, and
+              recommended only for benchmarking and reference calculations.
+        - normal: Generate histograms with appropriately-located normal distributions for every input point. In
+              principle this is more accurate than 'Gaussian' but in practice results are very similar and just as slow.
+        - gaussian_truncated: Gaussians are cut off at a range of 3 * max(sigma). This leads to small discontinuities in
+              the tails of the broadened peaks, but is _much_ faster to evaluate as all the broadening functions are the
+              same (limited) size. Recommended for production calculations.
+        - normal_truncated: As gaussian_truncated, but with normally-distributed histograms replacing Gaussian evaluation.
+              Results are marginally more accurate at a marginally increased cost.
+        - interpolate: A possibly-novel approach in which the spectrum broadened with a frequency-dependent kernel is
+              approximated by interpolation between a small set of spectra broadened with logarithmically-spaced
+              fixed-width kernels. In principle this method introduces an incorrect asymmetry to the peaks. In practice
+              the results are visually acceptable for a slowly-varying instrumental broadening function, with greatly
+              reduced computation time. The default spacing factor of sqrt(2) gives error of ~1%.
+        - interpolate_coarse: The approximate interpolative scheme (above) with a spacing factor of 2, which yields
+              error of ~5%. This is more likely to cause artefacts in the results... but it is very fast. Not recomended
+              for production calculations.
+
+    :type scheme: str
+
+    :returns: (freq_points, broadened_spectrum)
+
+    The *freq_points* are the mid-bin frequency values which
+    nominally correspond to *broadened_spectrum*; both of these are
+    1D arrays of length (N-1) corresponding to the length-N *bins*.
+    """
+    if (bins is None) or (s_dft is None) or (sigma is None):
+        raise ValueError("Frequency bins, S data and broadening width must be provided.")
+
+    bins = np.asarray(bins)
+    freq_points = (bins[1:] + bins[:-1]) / 2
+
+    #  Don't bother broadening if there is nothing here to broaden: return empty spectrum
+    if (len(s_dft) == 0) or not np.any(s_dft):
+        return freq_points, np.zeros_like(freq_points)
+
+    #  Allow histogram data to be input as bins + s_dft; use the mid-bin values
+    elif frequencies is None:
+        if len(s_dft) == len(bins) - 1:
+            frequencies = freq_points
+        else:
+            raise ValueError("Cannot determine frequency values for s_dft before broadening")
+
+    if scheme == 'none':
+        # Null broadening; resample input data and return
+        hist, _ = np.histogram(frequencies, bins=bins, weights=s_dft, density=False)
+        return freq_points, hist
+
+    elif scheme == 'gaussian':
+        # Gaussian broadening on the full grid (no range cutoff, sum over all peaks).
+        # This is not very optimised but a useful reference for "correct" results.
+        bin_width = bins[1] - bins[0]
+        kernels = mesh_gaussian(sigma=sigma[:, np.newaxis],
+                                points=freq_points,
+                                center=frequencies[:, np.newaxis])
+        spectrum = np.dot(kernels.transpose(), s_dft)
+        return freq_points, spectrum
+
+    elif scheme == 'normal':
+        # Normal distribution on the full grid (no range cutoff, sum over all peaks)
+        # In principle this scheme yields slightly more accurate histograms that direct
+        # Gaussian evaluation. In practice the results are very similar but there may
+        # be a performance boost from avoiding the normalisation step.
+        kernels = normal(sigma=sigma[:, np.newaxis],
+                         bins=bins,
+                         center=frequencies[:, np.newaxis])
+        spectrum = np.dot(kernels.transpose(), s_dft)
+        return freq_points, spectrum
+
+    elif scheme == 'gaussian_truncated':
+        # Gaussian broadening on the full grid (sum over all peaks, Gaussian range limited to 3 sigma)
+        # All peaks use the same number of points; if the center is located close to the low- or high-freq limit,
+        # the frequency point set is "justified" to lie inside the range
+        points, spectrum = trunc_and_sum_inplace(function=gaussian,
+                                                 sigma=sigma[:, np.newaxis],
+                                                 points=freq_points,
+                                                 bins=bins,
+                                                 center=frequencies[:, np.newaxis],
+                                                 limit=3,
+                                                 weights=s_dft[:, np.newaxis],
+                                                 method='auto')
+        # Normalize the Gaussian kernel such that sum of points matches input
+        bin_width = bins[1] - bins[0]
+        return points, spectrum * bin_width
+
+    elif scheme == 'normal_truncated':
+        # Normal distribution on the full grid (sum over all peaks, Gaussian range limited to 3 sigma)
+        # All peaks use the same number of points; if the center is located close to the low- or high-freq limit,
+        # the frequency point set is "justified" to lie inside the range
+        return trunc_and_sum_inplace(function=normal,
+                                     function_uses='bins',
+                                     sigma=sigma[:, np.newaxis],
+                                     points=freq_points,
+                                     bins=bins,
+                                     center=frequencies[:, np.newaxis],
+                                     limit=3,
+                                     weights=s_dft[:, np.newaxis],
+                                     method='auto')
+
+    elif scheme == 'interpolate':
+        return interpolated_broadening(sigma=sigma, points=freq_points, bins=bins,
+                                       center=frequencies, weights=s_dft, is_hist=True,
+                                       limit=3, function='gaussian', spacing='sqrt2')
+
+    elif scheme == 'interpolate_coarse':
+        return interpolated_broadening(sigma=sigma, points=freq_points, bins=bins,
+                                       center=frequencies, weights=s_dft, is_hist=True,
+                                       limit=3, function='gaussian', spacing='2')
+
+    else:
+        raise ValueError('Broadening scheme "{}" not supported for this instrument, please correct '
+                         'AbinsParameters.sampling["broadening_scheme"]'.format(scheme))
+
+
+def mesh_gaussian(sigma=None, points=None, center=0):
+    """Evaluate a Gaussian function over a regular (given) mesh
+
+    The Gaussian is normalized by multiplying by the bin-width. This approach will give correct results even when the
+    function extends (or originates) beyond the sampling region; however, it cannot be applied to an irreglar mesh.
+    Note that this normalisation gives a consistent _sum_ rather than a consistent _area_; it is a suitable kernel for
+    convolution of a histogram.
+
+    If 'points' contains less than two values, an empty or zero array is returned as the spacing cannot be inferred.
+
+    :param sigma: sigma defines width of Gaussian
+    :param points: points for which Gaussian should be evaluated
+    :type points: 1-D array
+    :param center: center of Gaussian
+    :type center: float or array
+    :returns:
+        numpy array with calculated Gaussian values. Dimensions are broadcast from inputs:
+        to obtain an (M x N) 2D array of gaussians at M centers and
+        corresponding sigma values, evaluated over N points, let *sigma* and
+        *center* be arrays of shape (M, 1) (column vectors) and *points* have
+         shape (N,) or (1, N) (row vector).
+    """
+
+    if len(points) < 2:
+        return(np.zeros_like(points * sigma))
+
+    bin_width = points[1] - points[0]
+    return(gaussian(sigma=sigma, points=points, center=center) * bin_width)
+
+
+def gaussian(sigma=None, points=None, center=0):
+    """Evaluate a Gaussian function over a given mesh
+
+    :param sigma: sigma defines width of Gaussian
+    :param points: points for which Gaussian should be evaluated
+    :type points: 1-D array
+    :param center: center of Gaussian
+    :type center: float or array
+    :param normalized:
+    If True, scale the output so that the sum of all values
+    equals 1 (i.e. make a suitable kernel for convolution of a histogram.)
+    This will not be reliable if the function extends beyond the provided set of points.
+    :type normalized: bool
+    :returns:
+        numpy array with calculated Gaussian values. Dimensions are broadcast from inputs:
+        to obtain an (M x N) 2D array of gaussians at M centers and
+        corresponding sigma values, evaluated over N points, let *sigma* and
+        *center* be arrays of shape (M, 1) (column vectors) and *points* have
+         shape (N,) or (1, N) (row vector).
+         """
+    two_sigma = 2.0 * sigma
+    sigma_factor = two_sigma * sigma
+    kernel = np.exp(-(points - center) ** 2 / sigma_factor) / (np.sqrt(sigma_factor * np.pi))
+
+    return kernel
+
+
+def normal(bins=None, sigma=None, center=0):
+    """
+    Broadening function: fill histogram bins with normal distribution
+
+    This should give identical results to evaluating the Gaussian on a very dense grid before resampling to the bins
+    It is implemented using the analytic integral of the Gaussian function and should not require normalisation
+
+    Int [exp(-0.5((x-u)/sigma)^2)]/[sigma sqrt(2 pi)] dx = -0.5 erf([u - x]/[sqrt(2) sigma]) + k
+
+    :param bins: sample bins for function evaluation. This _must_ be evenly-spaced.
+    :type bins: 1-D array
+    :param sigma: width parameter; use a column vector to obtain a 2D array of peaks with different widths
+    :type sigma: float or Nx1 array
+    :param center: x-position of function center; use a column vector to obtain a 2D array of peaks with different centers.
+    :type center: float or Nx1 array
+    """
+
+    integral_at_bin_edges = -0.5 * erf((center - bins) / (np.sqrt(2) * sigma))
+    bin_contents = integral_at_bin_edges[..., 1:] - integral_at_bin_edges[..., :-1]
+
+    return bin_contents
+
+
+def trunc_function(function=None, sigma=None, points=None, center=None, limit=3):
+    """
+    Wrap a simple broadening function and evaluate within a limited range
+
+                                 *
+                                * *
+                                * *
+                               *   *
+                              *     *
+                            *         *
+                        ..: *         * :..
+                     -----------------------
+                            |----|
+                         limit * sigma
+
+    A spectrum is returned corresponding to the entire input set of
+    bins/points, but this is zeroed outside of a restricted range (``limit * sigma``).
+    The purpose of this is performance optimisation compared to evaluating over the whole range.
+    There is an artefact associated with this: a vertical step down to zero at the range cutoff.
+
+    Note that when evaluating over multiple center/sigma rows, each row uses its own cutoff range.
+
+    Note that this relies heavily on the _broadcasting_ capabilities of the
+    functions. They will be called with long 1-D arrays of input data, expected
+    to return 1-D data that is unpacked to the results matrix.
+
+    :param function: broadening function; this should accept named arguments "center", "sigma" and either "bins" or "points".
+    :type function: python function
+    :param sigma: sigma defines width of Gaussian
+    :type sigma: float or Nx1 array
+    :param points: points for which function should be evaluated. Use this option _or_ *bins*.
+    :type points: 1-D array
+    :param center: center of Gaussian
+    :type center: float or Nx1 array
+    :param limit: range (as multiple of sigma) for cutoff
+    :type limit: float
+
+    :returns: numpy array with calculated Gaussian values
+    """
+
+    if isinstance(center, float):
+        center = np.array([[center]])
+    if isinstance(sigma, float):
+        sigma = sigma * np.ones_like(center)
+
+    points_matrix = points * np.ones((len(center), 1))
+
+    center_matrix = center * np.ones(len(points))
+    sigma_matrix = sigma * np.ones(len(points))
+
+    distances = abs(points_matrix - center_matrix)
+    points_close_to_peaks = distances < (limit * sigma_matrix)
+
+    results = np.zeros((len(center), len(points)))
+    results[points_close_to_peaks] = function(sigma=sigma_matrix[points_close_to_peaks],
+                                              points=points_matrix[points_close_to_peaks],
+                                              center=center_matrix[points_close_to_peaks])
+    return results
+
+
+def trunc_and_sum_inplace(function=None, function_uses='points',
+                          sigma=None, points=None, bins=None, center=None, weights=1,
+                          limit=3, method='auto'):
+    """Wrap a simple broadening function, evaluate within a consistent range and sum to spectrum
+
+                      center1                          center2
+                         :                                :
+                         :                                :
+                         *                                :
+                        * *                               :
+                        * *                               **
+                       * |-| sigma1      +               *  *            + ...
+                      *     *                          *   |--| sigma2
+                    *         *                    . *          * .
+                ..: *         * :..             ..:: *          * ::..
+             --------------------------     -----------------------
+                    |----|                           |----|
+                 limit * max(sigma)          limit * max(sigma)
+
+    A spectrum is returned corresponding to the entire input set of
+    bins/points, but this is zeroed outside of a restricted range (``limit * sigma``).
+    The range is the same for all peaks, so max(sigma) is used for all.
+    The purpose of this is performance optimisation compared to evaluating over the whole range.
+    There is an artefact associated with this: a vertical step down to zero at the range cutoff.
+
+    The function will be evaluated with the input points or bins
+    for each set of (center, sigma) and summed to a single
+    spectrum. Two technical approaches are provided: a python for-loop
+    which iterates over the functions and sums/writes to the
+    appropriate regions of the output array, and a np.histogram dump of
+    all the frequency/intensity values. The for-loop approach has a
+    more consistent speed but performance ultimately depends on the array sizes.
+
+    :param function: broadening function; this should accept named arguments "center", "sigma" and either "bins" or "points".
+    :type function: python function
+    :param function_uses: 'points' or 'bins'; select which type of x data is passed to "function"
+    :param sigma: widths of broadening functions (passed to "sigma" argument of function)
+    :type sigma: float or Nx1 array
+    :param bins: sample bins for function evaluation. This _must_ be evenly-spaced.
+    :type bins: 1-D array
+    :param points: regular grid of points for which function should be evaluated.
+    :type points: 1-D array
+    :param center: centers of broadening functions
+    :type center: float or Nx1 array
+    :param weights: weights of peaks for summation
+    :type weights: float or array corresponding to "center"
+    :param limit: range (as multiple of sigma) for cutoff
+    :type limit: float
+    :param method:
+        'auto', 'histogram' or 'forloop'; select between implementations for summation at appropriate
+        frequencies. 'auto' uses 'forloop' when there are > 1500 frequencies
+    :type method: str
+
+    :returns: (points, spectrum)
+    :returntype: (1D array, 1D array)
+
+    """
+    # Select method for summation
+    # Histogram seems to be faster below 1500 points; tested on a macbook pro and a Xeon workstation.
+    # This threshold may change depending on updates to hardware, Python and Numpy...
+    # For now we hard-code the number. It would ideally live in AbinsParameters but is very specific to this function.
+    if method == 'auto' and points.size < 1500:
+        sum_method = 'histogram'
+    elif method == 'auto':
+        sum_method = 'forloop'
+    else:
+        sum_method = method
+
+    bin_width = bins[1] - bins[0]
+    if not np.isclose(points[1] - points[0], bin_width):
+        raise ValueError("Bin spacing and point spacing are not consistent")
+
+    freq_range = limit * max(sigma)
+    nrows = len(center)
+    ncols = int(np.ceil((freq_range * 2) / bin_width))
+    if ncols > len(points):
+        raise ValueError("Kernel is wider than evaluation region; "
+                         "use a smaller cutoff limit or a larger range of points/bins")
+
+    # Work out locations of frequency blocks. As these should all be the same size,
+    # blocks which would exceed array size are "justified" into the array
+    #
+    # Start by calculating ideal start positions (allowed to exceed bounds) and
+    # corresponding frequencies
+    #
+    # s-|-x----      |
+    #   | s---x----  |
+    #   |  s---x---- |
+    #   |     s---x--|-
+    #
+    #
+    start_indices = np.asarray((center - points[0] - freq_range + (bin_width / 2)) // bin_width,
+                               int)
+    # Values exceeding calculation range would have illegally large "initial guess" indices so clip those to final point
+    #   |            | s---x----     ->       |           s|--x----
+    start_indices[start_indices > len(points)] = -1
+
+    start_freqs = points[start_indices]
+
+    # Next identify points which will overshoot left: x lies to left of freq range
+    # s-|-x-:--      |
+    #   | s-:-x----  |
+    #   |  s:--x---- |
+    #   |   : s---x--|-
+    left_justified = center < freq_range
+
+    # "Left-justify" these points by setting start position to low limit
+    #   |sx-------   |
+    #   | s---x----  |
+    #   |  s---x---- |
+    #   |     s---x--|-
+    start_freqs[left_justified] = points[0]
+    start_indices[left_justified] = 0
+
+    # Apply same reasoning to fix regions overshooting upper bound
+    # Note that the center frequencies do not move: only the grid on which they are evaluated
+    #   |sx------:   |           |sx-------   |
+    #   | s---x--:-  |    --->   | s---x----  |
+    #   |  s---x-:-- |           |  s---x---- |
+    #   |     s--:x--|-          |   s-----x--|
+    right_justified = center > (points[-1] - freq_range)
+    start_freqs[right_justified] = points[-1] - (ncols * bin_width)
+    start_indices[right_justified] = len(points) - ncols
+
+    # freq_matrix is not used in (bins, forloop) mode so only generate if needed
+    if (function_uses == 'points') or (function_uses == 'bins' and sum_method == 'histogram'):
+        freq_matrix = start_freqs.reshape(nrows, 1) + np.arange(0, 2 * freq_range, bin_width)
+
+    # Dispatch kernel generation depending on x-coordinate scheme
+    if function_uses == 'points':
+        kernels = function(sigma=sigma,
+                           points=freq_matrix,
+                           center=center)
+    elif function_uses == 'bins':
+        bin_matrix = start_freqs.reshape(nrows, 1) + np.arange(-bin_width / 2, 2 * freq_range + bin_width / 2, bin_width)
+        kernels = function(sigma=sigma,
+                           bins=bin_matrix,
+                           center=center)
+    else:
+        raise ValueError('x-basis "{}" for broadening function is unknown.'.format(function_uses))
+
+    # Sum spectrum using selected method
+    if sum_method == 'histogram':
+        spectrum, bin_edges = np.histogram(np.ravel(freq_matrix),
+                                           bins,
+                                           weights=np.ravel(weights * kernels),
+                                           density=False)
+    elif sum_method == 'forloop':
+        spectrum = np.zeros_like(points)
+        for start, kernel, weight in zip(start_indices.flatten(), kernels, np.asarray(weights).flatten()):
+            scaled_kernel = kernel * weight
+            spectrum[start:start+ncols] += scaled_kernel
+    else:
+        raise ValueError('Summation method "{}" is unknown.', format(method))
+
+    return points, spectrum
+
+
+def interpolated_broadening(sigma=None, points=None, bins=None,
+                            center=None, weights=1, is_hist=False, limit=3,
+                            function='gaussian', spacing='sqrt2'):
+    """Return a fast estimate of frequency-dependent broadening
+
+    Consider a spectrum of two peaks, in the case where (as in indirect-geometry INS) the peak width
+    increases with frequency.
+
+       |
+       |        |
+       |        |
+    -----------------
+
+    In the traditional scheme we broaden each peak individually and combine:
+
+       *                      |                       *
+       *        |       +     |        *       =      *        *
+      * *       |             |      *   *           * *     *   *
+    -----------------      -----------------       -----------------
+
+    Instead of summing over broadening kernels evaluated at each peak, the approximate obtains
+    a spectrum corresponding to the following scheme:
+
+    - For each sigma value, the entire spectrum is convolved with an
+      appropriate-width broadening function
+    - At each frequency, the final spectrum value is drawn from the spectrum
+      broadened with corresponding sigma.
+
+    Compared to a summation over broadened peaks, this method introduces an
+    asymmetry to the spectrum about each peak.
+
+       *                    *                    *                         *
+       *        *          * *       *          * *       *      -->       **       *
+      * *       *         *   *     * *       *     *   *   *             *  *     *  *
+    ----------------- ,  ----------------- ,  -----------------         -----------------
+
+    This asymmetry should be tolerable as long as the broadening function
+    varies slowly in frequency relative to its width.
+
+    The benefit of this scheme is that we do not need to evaluate the
+    convolution at every sigma value; nearby spectra can be interpolated.
+    Trial-and-error finds that with optimal mixing the error of a Gaussian
+    approximated by mixing a wider and narrower Gaussian is ~ 5% when the sigma
+    range is factor of 2, and ~ 1% when the sigma range is a factor of sqrt(2).
+    A pre-optimised transfer function can be used for a fixed ratio between the
+    reference functions.
+
+    :param sigma: widths of broadening functions (passed to "sigma" argument of function)
+    :type sigma: float or Nx1 array
+    :param bins: sample bins for function evaluation. This _must_ be evenly-spaced.
+    :type bins: 1-D array
+    :param points: regular grid of points for which function should be evaluated.
+    :type points: 1-D array
+    :param center: centers of broadening functions
+    :type center: float or Nx1 array
+    :param weights: weights of peaks for summation
+    :type weights: float or array corresponding to "center"
+    :param is_hist:
+        If "weights" is already a histogram corresponding to evenly-spaced
+        frequencies, set this to True to avoid a redundant binning operation.
+    :type is_hist: bool
+    :param function: broadening function; currently only 'gaussian' is accepted
+    :type function: str
+    :param limit: range (as multiple of sigma) for cutoff
+    :type limit: float
+    :param spacing:
+        Spacing factor between Gaussian samples on log scale. This is not a
+        free parameter as a pre-computed curve is used for interpolation.
+        Allowed values: '2', 'sqrt2', with error ~5% and ~1% respectively.
+    :type spacing: str
+
+    :returns: (points, spectrum)
+    :returntype: (1D array, 1D array)
+
+    """
+
+    mix_functions = {'gaussian': {'2': {'lower': [-0.1873, 1.464, -4.079, 3.803],
+                                        'upper': [0.2638, -1.968, 5.057, -3.353]},
+                                  'sqrt2': {'lower': [-0.6079, 4.101, -9.632, 7.139],
+                                            'upper': [0.7533, -4.882, 10.87, -6.746]}}}
+    log_bases = {'2': 2, 'sqrt2': np.sqrt(2)}
+    log_base = log_bases[spacing]
+
+    # Sample on appropriate log scale: log_b(x) = log(x) / lob(b)
+    n_kernels = int(np.ceil(np.log(max(sigma) / min(sigma)) / np.log(log_base)))
+
+    if n_kernels == 1:
+        sigma_samples = np.array([min(sigma)])
+    else:
+        sigma_samples = log_base**np.arange(n_kernels + 1) * min(sigma)
+
+    bin_width = bins[1] - bins[0]
+
+    # Get set of convolved spectra for interpolation
+    if is_hist:
+        hist = weights
+    else:
+        hist, _ = np.histogram(center, bins=bins, weights=weights, density=False)
+    freq_range = 3 * max(sigma)
+    kernel_npts_oneside = np.ceil(freq_range / bin_width)
+
+    if function == 'gaussian':
+        kernels = mesh_gaussian(sigma=sigma_samples[:, np.newaxis],
+                                points=np.arange(-kernel_npts_oneside, kernel_npts_oneside + 1, 1) * bin_width,
+                                center=0)
+    else:
+        raise ValueError('"{}" kernel not supported for "interpolate" broadening method.'.format(function))
+
+    spectra = np.array([convolve(hist, kernel, mode='same') for kernel in kernels])
+
+    # Interpolate with parametrised relationship
+    sigma_locations = np.searchsorted(sigma_samples, sigma) # locations in sampled values of points from sigma
+    spectrum = np.zeros_like(points)
+    # Samples with sigma == min(sigma) are a special case: copy directly from spectrum
+    spectrum[sigma_locations==0] = spectra[0, sigma_locations==0]
+
+    for i in range(1, len(sigma_samples)):
+        masked_block = (sigma_locations == i)
+        sigma_factors = sigma[masked_block] / sigma_samples[i - 1]
+        lower_mix = np.polyval(mix_functions[function][spacing]['lower'], sigma_factors)
+        upper_mix = np.polyval(mix_functions[function][spacing]['upper'], sigma_factors)
+
+        spectrum[masked_block] = (lower_mix * spectra[i-1, masked_block]
+                                  + upper_mix * spectra[i, masked_block])
+
+    return points, spectrum
diff --git a/scripts/AbinsModules/Instruments/Instrument.py b/scripts/AbinsModules/Instruments/Instrument.py
index b796460c2dcc719a031d7c3690a4362fdf439927..9f8ef4ccce0bc548174732db008d9aac4d389f19 100644
--- a/scripts/AbinsModules/Instruments/Instrument.py
+++ b/scripts/AbinsModules/Instruments/Instrument.py
@@ -1,12 +1,10 @@
 # Mantid Repository : https://github.com/mantidproject/mantid
 #
-# Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI,
+# Copyright &copy; 2019 ISIS Rutherford Appleton Laboratory UKRI,
 #     NScD Oak Ridge National Laboratory, European Spallation Source
 #     & Institut Laue - Langevin
 # SPDX - License - Identifier: GPL - 3.0 +
 from __future__ import (absolute_import, division, print_function)
-import AbinsModules
-import numpy as np
 
 
 # noinspection PyPep8Naming
@@ -22,85 +20,25 @@ class Instrument(object):
         :param  input_data: data from which Q2 should be calculated
         :returns:  numpy array with Q2 data
         """
+        raise NotImplementedError()
 
-        return None
-
-    def convolve_with_resolution_function(self, frequencies=None, s_dft=None):
+    def convolve_with_resolution_function(self, frequencies=None, bins=None, s_dft=None, scheme='auto'):
         """
-        Convolves discrete spectrum with the  resolution function for the particular instrument.
+        Convolves discrete spectrum with the resolution function for the particular instrument.
 
         :param frequencies: frequencies for which resolution function should be calculated (frequencies in cm-1)
-        :param s_dft:  discrete S calculated directly from DFT
-
-       """
-        return None
-
-    # should be treated as a protected function
-    def _convolve_with_resolution_function_helper(self, frequencies=None, s_dft=None, sigma=None, pkt_per_peak=None,
-                                                  gaussian=None):
-        """
-        :param frequencies: discrete frequencies in cm^-1
-        :parameter s_dft: discrete values of S
-        :param sigma: resolution function width (or array of widths corresponding to each frequency)
-        :param pkt_per_peak: number of points per peak
-        :param gaussian: gaussian-like function used to broaden peaks
-        :returns: frequencies for which peaks have been broadened, corresponding S
-        """
-        fwhm = AbinsModules.AbinsParameters.instruments['fwhm']
-
-        # freq_num: number of transition energies for the given quantum order event
-        # start[freq_num]
-        start = frequencies - fwhm * sigma
-
-        # stop[freq_num]
-        stop = frequencies + fwhm * sigma
-
-        # step[freq_num]
-        step = (stop - start) / float((pkt_per_peak - 1))
-
-        # matrix_step[freq_num, pkt_per_peak]
-        matrix_step = np.array([step, ] * pkt_per_peak).transpose()
-
-        # matrix_start[freq_num, pkt_per_peak]
-        matrix_start = np.array([start, ] * pkt_per_peak).transpose()
+        :type frequencies: 1D array-like
+        :param bins: Bin edges for output histogram. Most broadening implementations expect this to be regularly-spaced.
+        :type bins: 1D array-like
+        :param s_dft: discrete S calculated directly from DFT
+        :type s_dft: 1D array-like
+        :param scheme: Broadening scheme. Multiple implementations are available in *Instruments.Broadening* that trade-
+            off between speed and accuracy. Not all schemes must (or should?) be implemented for all instruments, but
+            'auto' should select something sensible.
+        :type scheme: str
 
-        # broad_freq[freq_num, pkt_per_peak]
-        broad_freq = (np.arange(0, pkt_per_peak) * matrix_step) + matrix_start
-        broad_freq[..., -1] = stop
-
-        # sigma_matrix[freq_num, pkt_per_peak]
-        sigma_matrix = np.array([sigma, ] * pkt_per_peak).transpose()
-
-        # frequencies_matrix[freq_num, pkt_per_peak]
-        frequencies_matrix = np.array([frequencies, ] * pkt_per_peak).transpose()
-
-        # gaussian_val[freq_num, pkt_per_peak]
-        dirac_val = gaussian(sigma=sigma_matrix, points=broad_freq, center=frequencies_matrix)
-
-        # s_dft_matrix[freq_num, pkt_per_peak]
-        s_dft_matrix = np.array([s_dft, ] * pkt_per_peak).transpose()
-
-        # temp_spectrum[freq_num, pkt_per_peak]
-        temp_spectrum = s_dft_matrix * dirac_val
-
-        points_freq = np.ravel(broad_freq)
-        broadened_spectrum = np.ravel(temp_spectrum)
-
-        return points_freq, broadened_spectrum
-
-    # should be treated as a protected function
-    def _gaussian(self, sigma=None, points=None, center=None):
-        """
-        :param sigma: sigma defines width of Gaussian
-        :param points: points for which Gaussian should be evaluated
-        :param center: center of Gaussian
-        :returns: numpy array with calculated Gaussian values
         """
-        two_sigma = 2.0 * sigma
-        sigma_factor = two_sigma * sigma
-        norm = (AbinsModules.AbinsParameters.sampling['pkt_per_peak']
-                / (AbinsModules.AbinsParameters.instruments['fwhm'] * two_sigma))
-        return np.exp(-(points - center) ** 2 / sigma_factor) / (np.sqrt(sigma_factor * np.pi) * norm)
+        raise NotImplementedError()
 
     def __str__(self):
         return self._name
diff --git a/scripts/AbinsModules/Instruments/ToscaInstrument.py b/scripts/AbinsModules/Instruments/ToscaInstrument.py
index 222a9385408223decb3164b6b5846acc58edaa21..9bc3b9b42a0252012b55022359de94d7c8872a65 100644
--- a/scripts/AbinsModules/Instruments/ToscaInstrument.py
+++ b/scripts/AbinsModules/Instruments/ToscaInstrument.py
@@ -6,7 +6,9 @@
 # SPDX - License - Identifier: GPL - 3.0 +
 from __future__ import (absolute_import, division, print_function)
 
+import numpy as np
 from .Instrument import Instrument
+from .Broadening import broaden_spectrum, prebin_required_schemes
 from AbinsModules import AbinsParameters
 from AbinsModules import AbinsConstants
 from AbinsModules.FrequencyPowderGenerator import FrequencyPowderGenerator
@@ -16,11 +18,14 @@ class ToscaInstrument(Instrument, FrequencyPowderGenerator):
     """
     Class for TOSCA and TOSCA-like instruments.
     """
+    parameters = AbinsParameters.instruments['TOSCA']
+
     def __init__(self, name):
         self._name = name
         super(ToscaInstrument, self).__init__()
 
-    def calculate_q_powder(self, input_data=None):
+    @classmethod
+    def calculate_q_powder(cls, input_data=None):
         """Calculates squared Q vectors for TOSCA and TOSCA-like instruments.
 
         By the cosine law Q^2 = k_f^2 + k_i^2 - 2 k_f k_i cos(theta)
@@ -38,38 +43,68 @@ class ToscaInstrument(Instrument, FrequencyPowderGenerator):
             constrained by conservation of mass/momentum and TOSCA geometry
         """
 
-        tosca_params = AbinsParameters.instruments['TOSCA']
-
-        k2_i = (input_data + tosca_params['final_neutron_energy']) * AbinsConstants.WAVENUMBER_TO_INVERSE_A
-        k2_f = tosca_params['final_neutron_energy'] * AbinsConstants.WAVENUMBER_TO_INVERSE_A
-        result = k2_i + k2_f - 2 * (k2_i * k2_f) ** 0.5 * tosca_params['cos_scattering_angle']
+        k2_i = (input_data + cls.parameters['final_neutron_energy']) * AbinsConstants.WAVENUMBER_TO_INVERSE_A
+        k2_f = cls.parameters['final_neutron_energy'] * AbinsConstants.WAVENUMBER_TO_INVERSE_A
+        result = k2_i + k2_f - 2 * (k2_i * k2_f) ** 0.5 * cls.parameters['cos_scattering_angle']
         return result
 
-    def convolve_with_resolution_function(self, frequencies=None, s_dft=None):
+    @classmethod
+    def get_sigma(cls, frequencies):
+        """Frequency-dependent broadening width from empirical fit"""
+        a = cls.parameters['a']
+        b = cls.parameters['b']
+        c = cls.parameters['c']
+        sigma = a * frequencies ** 2 + b * frequencies + c
+        return sigma
+
+    def convolve_with_resolution_function(self, frequencies=None, bins=None, s_dft=None, scheme='auto', prebin='auto'):
         """
         Convolves discrete DFT spectrum with the  resolution function for the TOSCA instrument (and TOSCA-like).
         :param frequencies:   DFT frequencies for which resolution function should be calculated (frequencies in cm^-1)
+        :param bins: Evenly-spaced frequency bin values for the output spectrum.
+        :type bins: 1D array-like
         :param s_dft:  discrete S calculated directly from DFT
+        :param scheme: Broadening scheme. This is passed to ``Instruments.Broadening.broaden_spectrum()`` unless set to
+            'auto'. If set to 'auto', the scheme will be based on the number of frequency values: 'gaussian_windowed' is
+            used for small numbers of peaks, while richer spectra will use the fast approximate 'interpolate' scheme.
+            To avoid any approximation or truncation, the 'gaussian' and 'normal' schemes are the most accurate, but
+            will run considerably more slowly.
+        :param prebin:
+            Bin the data before convolution. This greatly reduces the workload for large sets of frequencies, but loses
+            a little precision. If set to 'auto', a choice is made based on the relative numbers of frequencies and
+            sampling bins. For 'legacy' broadening this step is desregarded and implemented elsewhere.
+        :type prebin: str or bool
+
+        :returns: (points_freq, broadened_spectrum)
         """
-        if AbinsParameters.sampling['pkt_per_peak'] == 1:
-
-            points_freq = frequencies
-            broadened_spectrum = s_dft
 
+        if scheme == 'auto':
+            if frequencies.size > 50:
+                selected_scheme = 'interpolate'
+            else:
+                selected_scheme = 'gaussian_truncated'
         else:
+            selected_scheme = scheme
+
+        if prebin == 'auto':
+            if bins.size < frequencies.size:
+                prebin = True
+            elif selected_scheme in prebin_required_schemes:
+                prebin = True
+            else:
+                prebin = False
+
+        if prebin is True:
+            s_dft, _ = np.histogram(frequencies, bins=bins, weights=s_dft, density=False)
+            frequencies = (bins[1:] + bins[:-1]) / 2
+        elif prebin is False:
+            if selected_scheme in prebin_required_schemes:
+                raise ValueError('"prebin" should not be set to False when using "{}" broadening scheme'.format(scheme))
+        else:
+            raise ValueError('"prebin" option must be True, False or "auto"')
 
-            # freq_num: number of transition energies for the given quantum order event
-            # sigma[freq_num]
-
-            a = AbinsParameters.instruments['TOSCA']['a']
-            b = AbinsParameters.instruments['TOSCA']['b']
-            c = AbinsParameters.instruments['TOSCA']['c']
-            sigma = a * frequencies ** 2 + b * frequencies + c
+        sigma = self.get_sigma(frequencies)
 
-            pkt_per_peak = AbinsParameters.sampling['pkt_per_peak']
-            points_freq, broadened_spectrum = self._convolve_with_resolution_function_helper(frequencies=frequencies,
-                                                                                             s_dft=s_dft,
-                                                                                             sigma=sigma,
-                                                                                             pkt_per_peak=pkt_per_peak,
-                                                                                             gaussian=self._gaussian)
+        points_freq, broadened_spectrum = broaden_spectrum(frequencies, bins, s_dft,
+                                                           sigma, scheme=selected_scheme)
         return points_freq, broadened_spectrum
diff --git a/scripts/AbinsModules/SData.py b/scripts/AbinsModules/SData.py
index 0ef9fe22b27c0ae173bf4ecb7c7200debcf77c6a..88fe53fa8249eead1f2a3df21ddafc5f849393d8 100644
--- a/scripts/AbinsModules/SData.py
+++ b/scripts/AbinsModules/SData.py
@@ -53,18 +53,17 @@ class SData(AbinsModules.GeneralData):
                         raise ValueError("Numpy array was expected.")
 
             elif "frequencies" == item:
-
                 step = self._bin_width
                 bins = np.arange(start=AbinsModules.AbinsParameters.sampling['min_wavenumber'],
                                  stop=AbinsModules.AbinsParameters.sampling['max_wavenumber'] + step,
                                  step=step,
                                  dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
 
-                if not np.array_equal(items[item], bins[AbinsModules.AbinsConstants.FIRST_BIN_INDEX:-1]):
-                    raise ValueError("Invalid frequencies.")
+                freq_points = bins[:-1] + (step / 2)
+                if not np.array_equal(items[item], freq_points):
+                    raise ValueError("Invalid frequencies, these should correspond to the mid points of the resampled bins.")
 
             else:
-
                 raise ValueError("Invalid keyword " + item)
 
         self._data = items
diff --git a/scripts/AbinsModules/SPowderSemiEmpiricalCalculator.py b/scripts/AbinsModules/SPowderSemiEmpiricalCalculator.py
index 85e5e3b1bd8bbf7e99ef60dee2d7889b1c3a80d1..8194c0731166ce7a5f1f88d7539aa8ed12f9f8eb 100644
--- a/scripts/AbinsModules/SPowderSemiEmpiricalCalculator.py
+++ b/scripts/AbinsModules/SPowderSemiEmpiricalCalculator.py
@@ -107,13 +107,13 @@ class SPowderSemiEmpiricalCalculator(object):
                                  AbinsModules.AbinsConstants.QUANTUM_ORDER_THREE: self._calculate_order_three,
                                  AbinsModules.AbinsConstants.QUANTUM_ORDER_FOUR: self._calculate_order_four}
 
-        step = bin_width
-        self._bin_width = bin_width
-        start = AbinsParameters.sampling['min_wavenumber'] + step
-        stop = AbinsParameters.sampling['max_wavenumber'] + step
-        self._bins = np.arange(start=start, stop=stop, step=step, dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
+        self._bin_width = bin_width  # This is only here to store in s_data. Is that necessary/useful?
+        self._bins = np.arange(start=AbinsParameters.sampling['min_wavenumber'],
+                               stop=AbinsParameters.sampling['max_wavenumber'] + bin_width,
+                               step=bin_width,
+                               dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
+        self._frequencies = self._bins[:-1] + (bin_width / 2)
         self._freq_size = self._bins.size - 1
-        self._frequencies = self._bins[:-1]
 
         # set initial threshold for s for each atom
         self._num_atoms = len(self._abins_data.get_atoms_data().extract())
@@ -500,13 +500,11 @@ class SPowderSemiEmpiricalCalculator(object):
                                                      b_tensor=self._b_tensors[atom],
                                                      b_trace=self._b_traces[atom])
 
-            rebined_freq, rebined_spectrum = self._rebin_data_opt(array_x=local_freq, array_y=value_dft)
-
-            freq, broad_spectrum = self._instrument.convolve_with_resolution_function(frequencies=rebined_freq,
-                                                                                      s_dft=rebined_spectrum)
-
-            rebined_broad_spectrum = self._rebin_data_full(array_x=freq, array_y=broad_spectrum)
-            rebined_broad_spectrum = self._fix_empty_array(array_y=rebined_broad_spectrum)
+            broadening_scheme = AbinsParameters.sampling['broadening_scheme']
+            _, rebinned_broad_spectrum = self._instrument.convolve_with_resolution_function(frequencies=local_freq,
+                                                                                            bins=self._bins,
+                                                                                            s_dft=value_dft,
+                                                                                            scheme=broadening_scheme)
 
             local_freq, local_coeff = self._calculate_s_over_threshold(s=value_dft,
                                                                        freq=local_freq,
@@ -515,13 +513,13 @@ class SPowderSemiEmpiricalCalculator(object):
                                                                        order=order)
 
         else:
-            rebined_broad_spectrum = self._fix_empty_array()
+            rebinned_broad_spectrum = np.zeros_like(self._frequencies)
 
         # multiply by k-point weight and scaling constant
         # factor = self._weight / self._bin_width
         factor = self._weight
-        rebined_broad_spectrum = rebined_broad_spectrum * factor
-        return local_freq, local_coeff, rebined_broad_spectrum
+        rebinned_broad_spectrum = rebinned_broad_spectrum * factor
+        return local_freq, local_coeff, rebinned_broad_spectrum
 
     # noinspection PyUnusedLocal
     def _calculate_order_one(self, q2=None, frequencies=None, indices=None, a_tensor=None, a_trace=None,
@@ -644,10 +642,10 @@ class SPowderSemiEmpiricalCalculator(object):
 
     def _rebin_data_full(self, array_x=None, array_y=None):
         """
-        Rebins S data so that all quantum events have the same x-axis. The size of rebined data is equal to _bins.size.
+        Rebins S data so that all quantum events have the same x-axis. The size of rebinned data is equal to _bins.size.
         :param array_x: numpy array with frequencies
         :param array_y: numpy array with S
-        :returns: rebined frequencies
+        :returns: rebinned frequencies
         """
         indices = array_x != self._bins[-1]
         array_x = array_x[indices]
@@ -655,37 +653,6 @@ class SPowderSemiEmpiricalCalculator(object):
         maximum_index = min(len(array_x), len(array_y))
         return np.histogram(array_x[:maximum_index], bins=self._bins, weights=array_y[:maximum_index])[0]
 
-    def _rebin_data_opt(self, array_x=None, array_y=None):
-        """
-        Rebins S data in optimised way: the size of rebined data may be smaller then _bins.size.
-        :param array_x: numpy array with frequencies
-        :param array_y: numpy array with S
-        :returns: rebined frequencies, rebined S
-        """
-        if self._bins.size > array_x.size:
-            return array_x, array_y
-        else:
-            output_array_y = self._rebin_data_full(array_x, array_y)
-            return self._frequencies, output_array_y
-
-    def _fix_empty_array(self, array_y=None):
-        """
-        Fixes empty numpy arrays which occur in case of heavier atoms.
-        :returns: numpy array filled with zeros of dimension _bins.size - AbinsConstants.FIRST_BIN_INDEX
-        """
-        if array_y is None:
-            # number of frequencies = self._bins.size - AbinsConstants.FIRST_BIN_INDEX
-            output_y = np.zeros(shape=self._bins.size - AbinsModules.AbinsConstants.FIRST_BIN_INDEX,
-                                dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
-        elif array_y.any():
-            output_y = self._rebin_data_full(array_x=self._bins[AbinsModules.AbinsConstants.FIRST_BIN_INDEX:],
-                                             array_y=array_y)
-        else:
-            output_y = np.zeros(shape=self._bins.size - AbinsModules.AbinsConstants.FIRST_BIN_INDEX,
-                                dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
-
-        return output_y
-
     def calculate_data(self):
         """
         Calculates dynamical structure factor S.
diff --git a/scripts/test/AbinsAtomsDataTest.py b/scripts/test/Abins/AbinsAtomsDataTest.py
similarity index 100%
rename from scripts/test/AbinsAtomsDataTest.py
rename to scripts/test/Abins/AbinsAtomsDataTest.py
diff --git a/scripts/test/Abins/AbinsBroadeningTest.py b/scripts/test/Abins/AbinsBroadeningTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..c92f9e73d3f1f9acd7a1845ebe5db91cff5cfa01
--- /dev/null
+++ b/scripts/test/Abins/AbinsBroadeningTest.py
@@ -0,0 +1,165 @@
+from __future__ import (absolute_import, division, print_function)
+
+import unittest
+import numpy as np
+from numpy.testing import assert_array_almost_equal
+from scipy.stats import norm as spnorm
+
+from AbinsModules.Instruments import Broadening
+
+class AbinsBroadeningTest(unittest.TestCase):
+    """
+    Test Abins broadening functions
+    """
+
+    def test_gaussian(self):
+        """Benchmark Gaussian against (slower) Scipy norm.pdf"""
+        x = np.linspace(-10, 10, 101)
+        diff = np.abs(spnorm.pdf(x) - Broadening.gaussian(sigma=1, points=x, center=0))
+
+        self.assertLess(max(diff), 1e-8)
+
+        sigma, offset = 1.5, 4
+        diff = np.abs(spnorm.pdf((x - offset) / sigma) / (sigma)
+                      - Broadening.gaussian(sigma=sigma, points=x, center=offset))
+        self.assertLess(max(diff), 1e-8)
+
+    def test_mesh_gaussian_value(self):
+        """Check reference values and empty cases for mesh_gaussian"""
+        # Numerical values were not checked against an external reference
+        # so they are only useful for detecting if the results have _changed_.
+
+        self.assertEqual(Broadening.mesh_gaussian(sigma=5, points=[], center=1).shape,
+                         (0,))
+
+        zero_result = Broadening.mesh_gaussian(sigma=np.array([[5]]),
+                                               points=np.array([0,]),
+                                               center=np.array([[3]]))
+        self.assertEqual(zero_result.shape, (1, 1))
+        self.assertFalse(zero_result.any())
+
+        assert_array_almost_equal(Broadening.mesh_gaussian(sigma=2,
+                                                           points=np.array([0, 1]),
+                                                           center=0),
+                                  np.array([0.199471, 0.176033]))
+        assert_array_almost_equal(Broadening.mesh_gaussian(sigma=np.array([[2], [2]]),
+                                                           points=np.array([0, 1, 2]),
+                                                           center=np.array([[0], [1]])),
+                                  np.array([[0.199471, 0.176033, 0.120985],
+                                            [0.176033, 0.199471, 0.176033]]))
+
+    def test_mesh_gaussian_sum(self):
+        """Check sum of mesh_gaussian is correctly adapted to bin width"""
+        # Note that larger bin widths will not sum to 1 with this theoretical normalisation factor; this is a
+        # consequence of directly evaluating the Gaussian function. For coarse bins, consider using the "normal" kernel
+        # which does not have this error.
+        for bin_width in 0.1, 0.35:
+            points = np.arange(-20, 20, bin_width)
+            curve = Broadening.mesh_gaussian(sigma=0.4, points=points)
+
+            self.assertAlmostEqual(sum(curve), 1)
+
+    def test_normal_sum(self):
+        """Check that normally-distributed kernel sums to unity"""
+        # Note that unlike Gaussian kernel, this totals intensity 1 even with absurdly large bins
+
+        for bin_width in 0.1, 0.35, 3.1, 5:
+            bins = np.arange(-20, 20, bin_width)
+            curve = Broadening.normal(sigma=0.4, bins=bins)
+
+            self.assertAlmostEqual(sum(curve), 1)
+
+    def test_broaden_spectrum_values(self):
+        """Check broadening implementations give similar values"""
+
+        # Use dense bins with a single peak for fair comparison
+        npts = 1000
+        bins = np.linspace(0, 100, npts + 1)
+        freq_points = (bins[1:] + bins[:-1]) / 2
+        sigma = freq_points * 0.1 + 1
+        s_dft = np.zeros(npts)
+        s_dft[npts // 2] = 2
+
+        schemes = ['gaussian', 'gaussian_truncated',
+                   'normal', 'normal_truncated',
+                   'interpolate']
+
+        results = {}
+        for scheme in schemes:
+            _, results[scheme] = Broadening.broaden_spectrum(
+                freq_points, bins, s_dft, sigma, scheme)
+
+        for scheme in schemes:
+            # Interpolate scheme is approximate so just check a couple of sig.fig.
+            if scheme == 'interpolate':
+                places = 3
+            else:
+                places = 6
+            self.assertAlmostEqual(results[scheme][(npts // 2) + 20],
+                                   0.01257,
+                                   places=places)
+
+    def test_out_of_bounds(self):
+        """Check schemes allowing arbitrary placement can handle data beyond bins"""
+        frequencies = np.array([2000.])
+        bins = np.linspace(0, 100, 100)
+        s_dft = np.array([1.])
+        sigma = np.array([3.])
+
+        schemes = ['none',
+                   'gaussian', 'gaussian_truncated',
+                   'normal', 'normal_truncated']
+
+        for scheme in schemes:
+            Broadening.broaden_spectrum(frequencies, bins, s_dft, sigma, scheme=scheme)
+
+    def test_broadening_normalisation(self):
+        """Check broadening implementations do not change overall intensity"""
+        np.random.seed(0)
+
+        # Use a strange bin width to catch bin-width-dependent behaviour
+        bins = np.linspace(0, 5000, 2000)
+
+        def sigma_func(frequencies):
+            return 2 + frequencies * 1e-2
+
+        n_peaks = 10
+        frequencies = np.random.random(n_peaks) * 4000
+        sigma = sigma_func(frequencies)
+        s_dft = np.random.random(n_peaks) * 10
+
+        pre_broadening_total = sum(s_dft)
+
+        # Full Gaussian should reproduce null total
+        for scheme in ('none', 'gaussian'):
+            freq_points, spectrum = Broadening.broaden_spectrum(
+                frequencies, bins, s_dft, sigma, scheme=scheme)
+            self.assertAlmostEqual(sum(spectrum),
+                                   pre_broadening_total,)
+
+        # Normal scheme reproduces area as well as total;
+        freq_points, full_spectrum = Broadening.broaden_spectrum(
+            frequencies, bins, s_dft, sigma, scheme='normal')
+        self.assertAlmostEqual(np.trapz(spectrum, x=freq_points),
+                               pre_broadening_total * (bins[1] - bins[0]),)
+        self.assertAlmostEqual(sum(spectrum), pre_broadening_total)
+
+        # truncated forms will be a little off but shouldn't be _too_ off
+        for scheme in ('gaussian_truncated', 'normal_truncated'):
+
+            freq_points, trunc_spectrum = Broadening.broaden_spectrum(
+                frequencies, bins, s_dft, sigma, scheme)
+            self.assertLess(abs(sum(full_spectrum) - sum(trunc_spectrum)) / sum(full_spectrum),
+                                0.03)
+
+        # Interpolated methods need histogram input and smooth sigma
+        hist_spec, _ = np.histogram(frequencies, bins, weights=s_dft)
+        hist_sigma = sigma_func(freq_points)
+        freq_points, interp_spectrum = Broadening.broaden_spectrum(
+            freq_points, bins, hist_spec, hist_sigma, scheme='interpolate')
+        self.assertLess(abs(sum(interp_spectrum) - pre_broadening_total) / pre_broadening_total,
+                            0.05)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/AbinsCalculateDWSingleCrystalTest.py b/scripts/test/Abins/AbinsCalculateDWSingleCrystalTest.py
similarity index 100%
rename from scripts/test/AbinsCalculateDWSingleCrystalTest.py
rename to scripts/test/Abins/AbinsCalculateDWSingleCrystalTest.py
diff --git a/scripts/test/AbinsCalculatePowderTest.py b/scripts/test/Abins/AbinsCalculatePowderTest.py
similarity index 100%
rename from scripts/test/AbinsCalculatePowderTest.py
rename to scripts/test/Abins/AbinsCalculatePowderTest.py
diff --git a/scripts/test/AbinsCalculateQToscaTest.py b/scripts/test/Abins/AbinsCalculateQToscaTest.py
similarity index 100%
rename from scripts/test/AbinsCalculateQToscaTest.py
rename to scripts/test/Abins/AbinsCalculateQToscaTest.py
diff --git a/scripts/test/AbinsCalculateSPowderTest.py b/scripts/test/Abins/AbinsCalculateSPowderTest.py
similarity index 100%
rename from scripts/test/AbinsCalculateSPowderTest.py
rename to scripts/test/Abins/AbinsCalculateSPowderTest.py
diff --git a/scripts/test/AbinsCalculateSingleCrystalTest.py b/scripts/test/Abins/AbinsCalculateSingleCrystalTest.py
similarity index 100%
rename from scripts/test/AbinsCalculateSingleCrystalTest.py
rename to scripts/test/Abins/AbinsCalculateSingleCrystalTest.py
diff --git a/scripts/test/AbinsDWSingleCrystalDataTest.py b/scripts/test/Abins/AbinsDWSingleCrystalDataTest.py
similarity index 100%
rename from scripts/test/AbinsDWSingleCrystalDataTest.py
rename to scripts/test/Abins/AbinsDWSingleCrystalDataTest.py
diff --git a/scripts/test/AbinsFrequencyPowderGeneratorTest.py b/scripts/test/Abins/AbinsFrequencyPowderGeneratorTest.py
similarity index 100%
rename from scripts/test/AbinsFrequencyPowderGeneratorTest.py
rename to scripts/test/Abins/AbinsFrequencyPowderGeneratorTest.py
diff --git a/scripts/test/AbinsIOmoduleTest.py b/scripts/test/Abins/AbinsIOmoduleTest.py
similarity index 100%
rename from scripts/test/AbinsIOmoduleTest.py
rename to scripts/test/Abins/AbinsIOmoduleTest.py
diff --git a/scripts/test/Abins/AbinsInstrumentTest.py b/scripts/test/Abins/AbinsInstrumentTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7fa7d2f2cb3e2a4d5f0439f7f304dc74dbdfbfe
--- /dev/null
+++ b/scripts/test/Abins/AbinsInstrumentTest.py
@@ -0,0 +1,25 @@
+# Mantid Repository : https://github.com/mantidproject/mantid
+#
+# Copyright &copy; 2019 ISIS Rutherford Appleton Laboratory UKRI,
+#     NScD Oak Ridge National Laboratory, European Spallation Source
+#     & Institut Laue - Langevin
+# SPDX - License - Identifier: GPL - 3.0 +
+from __future__ import (absolute_import, division, print_function)
+import unittest
+
+from AbinsModules.Instruments.Instrument import Instrument
+
+
+class AbinsInstrumentTest(unittest.TestCase):
+    def test_instrument_notimplemented(self):
+        instrument = Instrument()
+
+        with self.assertRaises(NotImplementedError):
+            instrument.calculate_q_powder()
+
+        with self.assertRaises(NotImplementedError):
+            instrument.convolve_with_resolution_function()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/AbinsKpointsDataTest.py b/scripts/test/Abins/AbinsKpointsDataTest.py
similarity index 100%
rename from scripts/test/AbinsKpointsDataTest.py
rename to scripts/test/Abins/AbinsKpointsDataTest.py
diff --git a/scripts/test/AbinsLoadCASTEPTest.py b/scripts/test/Abins/AbinsLoadCASTEPTest.py
similarity index 92%
rename from scripts/test/AbinsLoadCASTEPTest.py
rename to scripts/test/Abins/AbinsLoadCASTEPTest.py
index 800a9ef8d73c1af1db8ddd27a008d0f1743bd1dd..f623bcce5182db8e82efc6bfefafc54a12b3243a 100644
--- a/scripts/test/AbinsLoadCASTEPTest.py
+++ b/scripts/test/Abins/AbinsLoadCASTEPTest.py
@@ -20,14 +20,13 @@ class AbinsLoadCASTEPTest(unittest.TestCase, AbinsModules.GeneralLoadAbInitioTes
 
         with self.assertRaises(ValueError):
             # noinspection PyUnusedLocal
-            poor_castep_reader = AbinsModules.LoadCASTEP(input_ab_initio_filename=1)
+            AbinsModules.LoadCASTEP(input_ab_initio_filename=1)
 
     def tearDown(self):
         AbinsModules.AbinsTestHelpers.remove_output_files(list_of_names=["LoadCASTEP"])
 
-
-#  *************************** USE CASES ********************************************
-# ===================================================================================
+    #  *************************** USE CASES ********************************************
+    # ===================================================================================
     # | Use case: Gamma point calculation and sum correction enabled during calculations|
     # ===================================================================================
     _gamma_sum = "squaricn_sum_LoadCASTEP"
@@ -44,7 +43,7 @@ class AbinsLoadCASTEPTest(unittest.TestCase, AbinsModules.GeneralLoadAbInitioTes
         self.check(name=self._gamma_no_sum, loader=AbinsModules.LoadCASTEP)
 
     # ===================================================================================
-    # | Use case: more than one k-point and sum correction       |
+    # | Use case: more than one k-point and sum correction                              |
     # ===================================================================================
     _many_k_sum = "Si2-phonon_LoadCASTEP"
 
diff --git a/scripts/test/AbinsLoadCRYSTALTest.py b/scripts/test/Abins/AbinsLoadCRYSTALTest.py
similarity index 95%
rename from scripts/test/AbinsLoadCRYSTALTest.py
rename to scripts/test/Abins/AbinsLoadCRYSTALTest.py
index 4a4968e658ab4733012b6595124f34fc2e348cb6..14256b6b27c9df2e14acca0686bf0da274abc508 100644
--- a/scripts/test/AbinsLoadCRYSTALTest.py
+++ b/scripts/test/Abins/AbinsLoadCRYSTALTest.py
@@ -6,7 +6,6 @@
 # SPDX - License - Identifier: GPL - 3.0 +
 from __future__ import (absolute_import, division, print_function)
 import unittest
-from mantid.simpleapi import logger
 import AbinsModules
 
 
@@ -15,7 +14,7 @@ class AbinsLoadCRYSTALTest(unittest.TestCase, AbinsModules.GeneralLoadAbInitioTe
     def tearDown(self):
         AbinsModules.AbinsTestHelpers.remove_output_files(list_of_names=["LoadCRYSTAL"])
 
-        #  *************************** USE CASES ********************************************
+    # *************************** USE CASES *********************************************
     # ===================================================================================
     # | Use cases: Gamma point calculation for CRYSTAL                                  |
     # ===================================================================================
diff --git a/scripts/test/AbinsLoadDMOL3Test.py b/scripts/test/Abins/AbinsLoadDMOL3Test.py
similarity index 100%
rename from scripts/test/AbinsLoadDMOL3Test.py
rename to scripts/test/Abins/AbinsLoadDMOL3Test.py
diff --git a/scripts/test/AbinsLoadGAUSSIANTest.py b/scripts/test/Abins/AbinsLoadGAUSSIANTest.py
similarity index 100%
rename from scripts/test/AbinsLoadGAUSSIANTest.py
rename to scripts/test/Abins/AbinsLoadGAUSSIANTest.py
diff --git a/scripts/test/AbinsPowderDataTest.py b/scripts/test/Abins/AbinsPowderDataTest.py
similarity index 100%
rename from scripts/test/AbinsPowderDataTest.py
rename to scripts/test/Abins/AbinsPowderDataTest.py
diff --git a/scripts/test/Abins/CMakeLists.txt b/scripts/test/Abins/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0b0e82fe5ea12b5ec0ea1507fd5f37629a6dd264
--- /dev/null
+++ b/scripts/test/Abins/CMakeLists.txt
@@ -0,0 +1,23 @@
+# Tests for Abins INS simulation
+set ( TEST_PY_FILES
+   AbinsAtomsDataTest.py
+   AbinsBroadeningTest.py
+   AbinsCalculateDWSingleCrystalTest.py
+   AbinsCalculatePowderTest.py
+   AbinsCalculateQToscaTest.py
+   AbinsCalculateSingleCrystalTest.py
+   AbinsCalculateSPowderTest.py
+   AbinsDWSingleCrystalDataTest.py
+   AbinsFrequencyPowderGeneratorTest.py
+   AbinsInstrumentTest.py
+   AbinsIOmoduleTest.py
+   AbinsKpointsDataTest.py
+   AbinsLoadCASTEPTest.py
+   AbinsLoadCRYSTALTest.py
+   AbinsLoadDMOL3Test.py
+   AbinsLoadGAUSSIANTest.py
+   AbinsPowderDataTest.py
+  )
+
+pyunittest_add_test(${CMAKE_CURRENT_SOURCE_DIR} python.scripts
+  ${TEST_PY_FILES})
diff --git a/scripts/test/Abins/__init__.py b/scripts/test/Abins/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/scripts/test/CMakeLists.txt b/scripts/test/CMakeLists.txt
index 3bc1e07fd377d880236a3b64e6fc8907ddc59a55..482ef7c0b5c93d0fc06b387340e855701607e3e5 100644
--- a/scripts/test/CMakeLists.txt
+++ b/scripts/test/CMakeLists.txt
@@ -1,19 +1,4 @@
 set(TEST_PY_FILES
-    AbinsAtomsDataTest.py
-    AbinsCalculateDWSingleCrystalTest.py
-    AbinsCalculatePowderTest.py
-    AbinsCalculateQToscaTest.py
-    AbinsCalculateSingleCrystalTest.py
-    AbinsCalculateSPowderTest.py
-    AbinsDWSingleCrystalDataTest.py
-    AbinsFrequencyPowderGeneratorTest.py
-    AbinsIOmoduleTest.py
-    AbinsKpointsDataTest.py
-    AbinsLoadCASTEPTest.py
-    AbinsLoadCRYSTALTest.py
-    AbinsLoadDMOL3Test.py
-    AbinsLoadGAUSSIANTest.py
-    AbinsPowderDataTest.py
     AbsorptionShapesTest.py 	
     ConvertToWavelengthTest.py
     CrystalFieldMultiSiteTest.py
@@ -60,6 +45,7 @@ unset(PYUNITTEST_QT_API)
 
 
 # Additional tests
+add_subdirectory(Abins)
 add_subdirectory(directtools)
 add_subdirectory(MultiPlotting)
 add_subdirectory(Muon)