Commit 99dee52e authored by Tung, Chi-Huan's avatar Tung, Chi-Huan
Browse files

first commit

parent c5a91649
Loading
Loading
Loading
Loading
Loading
+10 −2
Original line number Diff line number Diff line
@@ -3,12 +3,20 @@ FROM ubuntu:20.04
RUN DEBIAN_FRONTEND="noninteractive" apt-get update && apt-get -y install tzdata

RUN apt-get update \
  && apt-get install -y software-properties-common \
  && add-apt-repository ppa:deadsnakes/ppa \
  && apt-get update \
  && apt-get install -y \
      python3 \
      python3.9 \
      python3-pip \
	  python3-dev \
	  python3-venv \
  && apt-get clean

RUN pip3 install scipy numpy matplotlib
RUN pip3 install pip==21.3.1
RUN pip3 install scipy numpy matplotlib lmfit
RUN pip3 install --upgrade tensorflow
RUN pip3 install tensorrt

COPY /src /src

galaxy/tool_conf.xml

0 → 100644
+151 −0
Original line number Diff line number Diff line
<?xml version='1.0' encoding='utf-8'?>
<toolbox monitor="true">
  <label id="neutrons" text="Neutrons" version="" />
    <section id="neutrons_getext" name="Get Data">
      <tool file="neutrons/import_oncat.xml" />
      <tool file="data_source/upload.xml" />
      <tool hidden="true" file="neutrons/register.xml" />
      <tool file="neutrons/ingress.xml" />
    </section>

    <section id="neutrons_tests" name="Neutrons Test Tools">
      <tool file="neutrons/remote_command.xml"/>
      <tool file="neutrons/file_head.xml"/>
      <tool file="neutrons/fractal.xml"/>
      <tool file="neutrons/monitor.xml" />
    </section>
    
    <section id="neutrons_asrp" name="Automated Structural Refinement">
      <tool file="neutrons/asrp_single_peak_fitting.xml"/>
      <tool file="neutrons/asrp_pattern_matching.xml"/>
      <tool file="neutrons/asrp_gsas2_refinement.xml"/>
      <tool file="neutrons/asrp_diffpy_cmi_refinement.xml"/>
    </section>
    
    <section id="neutrons_ins" name="Inelastic Neutron Scattering">
      <tool file="neutrons/qclimax.xml"/>
      <tool file="neutrons/qclimaxIni.xml"/>
    </section>
    
    <section id="neutrons_dca" name="Advanced Neutron Data Analysis for Quantum Materials">
      <tool file="neutrons/dca_convergence.xml" />
      <tool file="neutrons/dca_binning.xml" />
      <tool file="neutrons/dca_s_of_q_omega.xml" />
      <tool file="neutrons/dca_analysis_to_maxent.xml" />
      <tool file="neutrons/dca_maxent.xml" />
      <tool file="neutrons/dca_maxent_plot.xml" />
      <tool file="neutrons/rpa_mrpapp.xml" />
      <tool file="neutrons/rpa_plot_chirpa.xml" />
    </section>
    
    <section id="neutrons_mcu" name="Monte Carlo Ray Tracing">
      <tool file="neutrons/mcu_get_instrument.xml" />
      <tool file="neutrons/mcu_incident_beamline_configurator.xml" />
      <tool file="neutrons/mcu_incident_beamline_simulator.xml" />
      <tool file="neutrons/mcu_mcpl_tool.xml" />
      <tool file="neutrons/interactivetool_reduce120.xml" />
      <tool file="neutrons/mcu_sample_simulator.xml" />
    </section>
    
    <section id="ndip_datared" name="Data Reduction">
      <tool file="neutrons/usans.xml"/>
      <tool file="neutrons/gpsans.xml"/>
      <tool file="neutrons/topaz_reduce.xml"/>
      <tool file="neutrons/topaz_combine.xml"/>
    </section>
    
    <section id="ndip_amml" name="Atomistic modeling and machine learning">
      <tool file="neutrons/amml_cp2k.xml"/>
      <tool file="neutrons/amml_cp2k_convert.xml"/>
      <tool file="neutrons/amml_train.xml"/>
      <tool file="neutrons/amml_lammps.xml"/>
      <tool file="neutrons/amml_oclimax.xml"/>
      <tool file="neutrons/amml_pclimax.xml"/>
    </section>
    
    <section id="neutrons_interactivetools" name="Interactive Tools">
      <tool file="neutrons/interactivetool_generic_output.xml" />
      <tool file="neutrons/ct_reconstruction.xml" />
      <tool file="neutrons/interactivetool_paraview.xml" />
    </section>
    
    <section id="neutrons_amira" name= "Amira Visualization">
      <tool file="neutrons/interactivetool_thinlinc.xml" />
    </section>
    
    <section id="neutrons_misc" name="Miscellaneous">
      <tool file="neutrons/get_experiment_location.xml" />
    </section>
      
    <section id="ndip_mlsans" name="MLSANS">
	  <tool file="neutrons/mlsans_fit_Yukawa_sq.xml" />
    </section>


  <label id="generic" text="Generic" version="" />

  <section id="send" name="Send Data">
    <tool file="data_export/send.xml" />
    <tool file="data_export/export_remote.xml" />
    <tool file="neutrons/export.xml"  />
  </section>

  <section id="collection_operations" name="Collection Operations">
    <tool file="${model_tools_path}/unzip_collection.xml" />
    <tool file="${model_tools_path}/zip_collection.xml" />
    <tool file="${model_tools_path}/filter_failed_collection.xml" />
    <tool file="${model_tools_path}/filter_empty_collection.xml" />
    <tool file="${model_tools_path}/flatten_collection.xml" />
    <tool file="${model_tools_path}/merge_collection.xml" />
    <tool file="${model_tools_path}/relabel_from_file.xml" />
    <tool file="${model_tools_path}/filter_from_file.xml" />
    <tool file="${model_tools_path}/sort_collection_list.xml" />
    <tool file="${model_tools_path}/tag_collection_from_file.xml" />
    <tool file="${model_tools_path}/apply_rules.xml" />
    <tool file="${model_tools_path}/build_list.xml" />
    <tool file="${model_tools_path}/build_list_1.2.0.xml" />
    <tool file="${model_tools_path}/extract_dataset.xml" />
  </section>

  <section id="expression_tools" name="Expression Tools">
    <tool file="expression_tools/parse_values_from_file.xml"/>
  </section>

  <section id="textutil" name="Text Manipulation">
    <tool file="filters/fixedValueColumn.xml" />
    <tool file="filters/catWrapper.xml" />
    <tool file="filters/cutWrapper.xml" />
    <tool file="filters/mergeCols.xml" />
    <tool file="filters/convert_characters.xml" />
    <tool file="filters/CreateInterval.xml" />
    <tool file="filters/cutWrapper.xml" />
    <tool file="filters/changeCase.xml" />
    <tool file="filters/pasteWrapper.xml" />
    <tool file="filters/remove_beginning.xml" />
    <tool file="filters/randomlines.xml" />
    <tool file="filters/headWrapper.xml" />
    <tool file="filters/tailWrapper.xml" />
    <tool file="filters/trimmer.xml" />
    <tool file="filters/wc_gnu.xml" />
    <tool file="filters/secure_hash_message_digest.xml" />
  </section>

  <section id="filter" name="Filter and Sort">
    <tool file="stats/filtering.xml" />
    <tool file="filters/sorter.xml" />
    <tool file="filters/grep.xml" />
    <tool file="filters/grep_1.0.1.xml"/>
  </section>

  <section id="group" name="Join, Subtract and Group">
    <tool file="filters/joiner.xml" />
    <tool file="filters/compare.xml" />
    <tool file="stats/grouping.xml" />
  </section>

  <section id="stats" name="Statistics">
    <tool file="stats/gsummary.xml" />
    <tool file="filters/uniq.xml" />
  </section>

</toolbox>
+28 −0
Original line number Diff line number Diff line
<tool id="mlsans_fit_Yukawa_sq" name="fit_IQ"  profile="22.04" version="0.1.1">
  <description></description>
    <requirements>
        <container type="docker">code.ornl.gov:4567/ndip/tool-sources/ml-assisted-sans-data-analysis/plot_iq:0.1</container>
    </requirements>
    <command detect_errors="exit_code"><![CDATA[
        python3 /src/plotiq.py -i $input $C $I_inc $sigma $d_sigma $phi $kappa $A 
    ]]></command>
    <inputs>
        <param name="input" type="data" optional="false" label="ASCII data"/>
		<param name="C" type="text" value="5000, 1000, 8000" label="C" optional="false" help="Contrast"/>
        <param name="I_inc" type="text" value="0.2, 0.1, 0.5" label="I_inc" optional="false" help="Incoherent background"/>
        <param name="sigma" type="text" value="1, 0.9, 1.1" label="σ" optional="false" help="Particle diameter"/>
		<param name="d_sigma" type="text" value="0.05, 0.01, 0.1" label="d_σ" optional="false" help="Particle size polydispersity"/>
		<param name="phi" type="text" value="0.2, 0.1, 0.5" label="Φ" optional="false" help="Volume fraction"/>
        <param name="kappa" type="text" value="0.1, 0.01, 0.5" label="1/κD" optional="false" help="Screening constant"/>
        <param name="A" type="text" value="10, 1, 20" label="βA" optional="false" help="Repulsion strength"/>
    </inputs>
	
    <!-- <outputs>
        <data name="output" format="png" label="LogLog Plot">
        </data>
    </outputs> -->
    <help><![CDATA[
        Fit the input I(Q) curve
]]></help>
</tool>

src/SQ_NN.py

0 → 100644
+201 −0
Original line number Diff line number Diff line
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp

# import tensorrt as trt
import tensorflow as tf

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 

# --------------------------------------
# Augmented decoder network
class Decoder_aug(tf.keras.Model):
    def __init__(self, latent_dim, sq_dim):
        super(Decoder_aug,self).__init__()
        self.latent_dim = latent_dim
        
        model_aug = tf.keras.Sequential(
        [
            tf.keras.layers.InputLayer(input_shape=(3)),
            tf.keras.layers.Dense(6, 
                        kernel_regularizer = None,
                        name='dense_in'),
            tf.keras.layers.Dense(6, 
                        kernel_regularizer = None,
                        name='dense_in2'),
        ]
        )
        
        model_decoder = tf.keras.Sequential(
        [
            tf.keras.layers.InputLayer(input_shape=(latent_dim,)),
            tf.keras.layers.Dense(
                50*32, activation=tf.nn.relu, 
                kernel_regularizer = None,
                name='dense_de'),
            tf.keras.layers.Reshape(target_shape=(50, 32)),
            tf.keras.layers.Conv1DTranspose(
                filters=32, kernel_size=3, strides=2, padding='same', activation='relu',
                kernel_regularizer = None,
                name='conv1dtrs_de'),
            tf.keras.layers.Conv1DTranspose(
                filters=1, kernel_size=3, strides=1, padding='same'),
            tf.keras.layers.Reshape((sq_dim,))
        ]
        )
        
        
        self.aug_layers = model_aug
        self.decoder_layers = model_decoder
        
    @tf.function
    def sample(self, eps=None):
        if eps is None:
            eps = tf.random.normal(shape=(100, self.latent_dim))
        return self.decode(eps, apply_sigmoid=True)
    
    def encode(self, x):
        mean, logvar = tf.split(self.aug_layers(x), num_or_size_splits=2, axis=1)
        return mean, logvar
        
    def reparameterize(self, mean, logvar):
        eps = tf.random.normal(shape=mean.shape)
        return eps * tf.exp(logvar * .5) + mean
    
    def decode(self, z, apply_sigmoid=False):
        logits = self.decoder_layers(z)
        if apply_sigmoid:
            probs = tf.sigmoid(logits)
            return probs
        return logits
    
    @tf.function
    def sample_normal(self, x):
        mean, logvar = self.encode(x)
        eps = tf.random.normal(shape=(100, self.latent_dim))
#         z_samples = [e*tf.exp(logvar*.5) + mean for e in eps]
#         logits_samples = [self.decode(z, apply_sigmoid=True) for z in z_samples]
#         z_samples = eps*tf.exp(logvar*.5) + mean
#         logits_samples = self.decode(z_samples, apply_sigmoid=True)
        def zsample(e):
            return e*tf.exp(logvar*.5) + mean
        z_samples = tf.map_fn(zsample,eps)
        def logitsample(z):
            return self.decode(z, apply_sigmoid=True)
        logits_samples = tf.map_fn(logitsample,z_samples)
        
        logits_std = tf.math.reduce_std(logits_samples,axis=0)
        logits_mean = tf.math.reduce_mean(logits_samples,axis=0)
        
        return logits_mean, logits_std
    
    @tf.function
    def sample_mean(self, x):
        mean, logvar = self.encode(x)        
        logits_mean = self.decode(mean, apply_sigmoid=True)
        
        return logits_mean

# --------------------------------------
# load trained model
model = Decoder_aug(latent_dim=3, sq_dim=100)

export_path_aug = '/src/saved_model/SQ_fg_aug/'
model_name_aug = 'SQ_fg_aug_test'
export_name_aug = export_path_aug + model_name_aug

export_path_decoder = '/src/saved_model/SQ_fg_decoder/'
model_name_decoder = 'SQ_fg_decoder_test'
export_name_decoder = export_path_decoder + model_name_decoder

aug_layers_loaded = model.aug_layers.load_weights(export_name_aug, by_name=False, skip_mismatch=False, options=None)
model.aug_layers = aug_layers_loaded._root

decoder_layers_loaded = model.decoder_layers.load_weights(export_name_decoder, by_name=False, skip_mismatch=False, options=None)
model.decoder_layers = decoder_layers_loaded._root

# --------------------------------------
# Rescaling
# Transform the input to tensorflow tensor
def to_tf(arg):
    arg = tf.convert_to_tensor(arg, dtype=tf.float32)
    return arg

# rescale the training set SQ to range [0,1]
exp_scale = 6
def f_inp(sq):
    return tf.math.log(sq)/exp_scale/2 + 0.5

# transform the decoder output to SQ
def f_out(sq_pred):
    return np.exp((sq_pred*2-1)*exp_scale) # inverse of f_inp

@tf.function
def f_out_tf(predictions):
    return tf.math.exp((predictions*2-1)*exp_scale)

# --------------------------------------
# smoothing using GP
def RBF(d,lmbda):
    return np.exp(-d**2/2/lmbda**2)

def mldivide(A,B):
    return np.linalg.pinv(A).dot(B)

def sm_GP(f,lmbda,sigma):
    q_rs = (np.arange(100)+1)*0.2
    d_ij = q_rs.reshape(100,1) - q_rs.reshape(1,100)
    
    K = RBF(d_ij,lmbda)
    K_s = K
    K_y = K + np.eye(100)*sigma**2
    
    y = f
    L = np.linalg.cholesky(K_y)

    alpha = mldivide(L.T,mldivide(L,y))
    E = K_s.T@alpha
    
    return E

# --------------------------------------
# Scattering function
def SQ_NN(parameters, GP=False, lmbda=0.5, sigma=0.1):
    
    # mean and std of the training set labels
    mean = np.array([0.2325,0.2600,13.0000])
    std = np.sqrt(np.array([0.0169,0.0208,52.0000]))
    parameters_z = [(parameters[i]-mean[i])/std[i] for i in range(3)]
    
    x = tf.reshape(to_tf(parameters_z),(1,3))
#     sample_mean, sample_std = model.sample_normal(x)
    sample_mean = model.sample_mean(x)
    sample_mean = sample_mean[0]
    
    if not GP:
        return f_out_tf(sample_mean).numpy().reshape(100).astype('float64')
    else:
        sample_mean_GP = sm_GP(sample_mean-0.5,lmbda,sigma)+0.5
        return f_out_tf(sample_mean_GP).numpy().reshape(100).astype('float64')

def SQ_NN_tf(parameters, GP=False, lmbda=0.5, sigma=0.1):
    
    # mean and std of the training set labels
    mean = np.array([0.2325,0.2600,13.0000])
    std = np.sqrt(np.array([0.0169,0.0208,52.0000]))
    parameters_z = [(parameters[i]-mean[i])/std[i] for i in range(3)]
    
    x = tf.reshape(to_tf(parameters_z),(1,3))
#     sample_mean, sample_std = model.sample_normal(x)
    sample_mean = model.sample_mean(x)
    sample_mean = sample_mean[0]
    
    SQ = f_out_tf(sample_mean)
    
    if not GP:
        return f_out_tf(sample_mean)
    else:
        sample_mean_GP = sm_GP(sample_mean-0.5,lmbda,sigma)+0.5
        return f_out_tf(sample_mean_GP)
# --------------------------------------
+6.14 KiB

File added.

No diff preview for this file type.

Loading