diff --git a/demo/demo_mace3D.py b/demo/demo_mace3D.py index 76897aaa631d6792a81c5ec80a4085a96f02e2a0..4e4bf05e6c5e00581ab9e802aef5bea085aae16b 100644 --- a/demo/demo_mace3D.py +++ b/demo/demo_mace3D.py @@ -47,7 +47,8 @@ denoiser_name = denoiser_type # destination path to download and extract the phantom and NN weight files. target_dir = './demo_data/' # path to store output recon images -output_dir = './output/mace3D/' +save_path = './output/mace3D/' +os.makedirs(save_path, exist_ok=True) # Geometry parameters dist_source_detector = 839.0472 # Distance between the X-ray source and the detector in units of ALU @@ -62,6 +63,7 @@ sino_noise_sigma = 0.01 # transmission noise level # MACE recon parameters max_admm_itr = 10 # max ADMM iterations for MACE reconstruction +sharpness = 1.0 prior_weight = 0.5 # cumulative weights for three prior agents. # ######### End of parameters ######### @@ -125,15 +127,15 @@ if denoiser_type == 'dncnn_ct': # Load denoiser model structure and weights denoiser_model = denoiser_utils.DenoiserCT(checkpoint_dir=os.path.join(denoiser_path, 'model_dncnn_ct')) - # Define the denoiser using this model. This version requires some interface code to match with MACE. def denoiser(img_noisy): img_noisy = np.expand_dims(img_noisy, axis=1) + upper_range = denoiser_utils.calc_upper_range(img_noisy) + img_noisy = img_noisy/upper_range testData_obj = denoiser_utils.DataLoader(img_noisy) - denoiser_model.denoise(testData_obj) - img_denoised = np.stack(testData_obj.outData, axis=0) + img_denoised = denoiser_model.denoise(testData_obj, batch_size=256) + img_denoised = img_denoised*upper_range return np.squeeze(img_denoised) - # DnCNN denoiser in Keras. This denoiser model is trained on natural images. elif denoiser_type == 'dncnn_keras': print("Denoiser function: DnCNN trained on natural images.") @@ -163,7 +165,9 @@ recon_mace = mbircone.mace.mace3D(sino_noisy, angles, dist_source_detector, magn denoiser=denoiser, denoiser_args=(), max_admm_itr=max_admm_itr, prior_weight=prior_weight, delta_pixel_detector=delta_pixel_detector, - weight_type='transmission') + weight_type='transmission', + sharpness=sharpness, + save_path=save_path) recon_shape = recon_mace.shape print("Reconstruction shape = ", recon_shape) @@ -173,27 +177,23 @@ print("Reconstruction shape = ", recon_shape) # ########################################################################### print("Post processing MACE reconstruction results ...") # Save recon results as a numpy array -os.makedirs(output_dir, exist_ok=True) -np.save(os.path.join(output_dir, "recon_mace.npy"), recon_mace) - -# Plot sinogram data -demo_utils.plot_image(sino_noisy[0], title='sinogram view 0, noise level=0.05', - filename=os.path.join(output_dir, 'sino_noisy.png'), vmin=0, vmax=4) -demo_utils.plot_image(sino[0], title='clean sinogram view 0', filename=os.path.join(output_dir, 'sino_clean.png'), - vmin=0, vmax=4) -demo_utils.plot_image(noise[0], title='sinogram additive Gaussian noise, view 0', - filename=os.path.join(output_dir, 'sino_transmission_noise.png'), vmin=-0.08, vmax=0.08) +np.save(os.path.join(save_path, "recon_mace.npy"), recon_mace) +# load qGGMRF recon +recon_qGGMRF = np.load(os.path.join(save_path, "recon_qGGMRF.npy")) # Plot axial slices of phantom and recon -display_slices = [2, recon_shape[0] // 2] +display_slices = [7, 10, 13, 16, 19, 22] for display_slice in display_slices: demo_utils.plot_image(phantom[display_slice], title=f'phantom, axial slice {display_slice}', - filename=os.path.join(output_dir, f'phantom_slice{display_slice}.png'), vmin=0, vmax=0.5) + filename=os.path.join(save_path, f'phantom_slice{display_slice}.png'), vmin=0, vmax=0.5) demo_utils.plot_image(recon_mace[display_slice], title=f'MACE reconstruction, axial slice {display_slice}', - filename=os.path.join(output_dir, f'recon_mace_slice{display_slice}.png'), vmin=0, vmax=0.5) + filename=os.path.join(save_path, f'recon_mace_slice{display_slice}.png'), vmin=0, vmax=0.5) + demo_utils.plot_image(recon_qGGMRF[display_slice], title=f'qGGMRF reconstruction, axial slice {display_slice}', + filename=os.path.join(save_path, f'recon_qGGMRF_slice{display_slice}.png'), vmin=0, vmax=0.5) # Plot 3D phantom and recon image volumes as gif images. -demo_utils.plot_gif(phantom, output_dir, 'phantom', vmin=0, vmax=0.5) -demo_utils.plot_gif(recon_mace, output_dir, 'recon_mace', vmin=0, vmax=0.5) +demo_utils.plot_gif(phantom, save_path, 'phantom', vmin=0, vmax=0.5) +demo_utils.plot_gif(recon_mace, save_path, 'recon_mace', vmin=0, vmax=0.5) +demo_utils.plot_gif(recon_qGGMRF, save_path, 'recon_qGGMRF', vmin=0, vmax=0.5) input("press Enter") diff --git a/demo/demo_mace3D_fast.py b/demo/demo_mace3D_fast.py index b911d1232d11698ae8ef174c15fe963b2f98e883..7c2e79539bf188366ee9e1f52d0bce8ff5256e1b 100644 --- a/demo/demo_mace3D_fast.py +++ b/demo/demo_mace3D_fast.py @@ -49,21 +49,22 @@ denoiser_name = denoiser_type # destination path to download and extract the phantom and NN weight files. target_dir = './demo_data/' # path to store output recon images -output_dir = './output/mace3D_fast/' +save_path = './output/mace3D_fast/' # Geometry parameters dist_source_detector = 839.0472 # Distance between the X-ray source and the detector in units of ALU magnification = 5.572128439964856 # magnification = (source to detector distance)/(source to center-of-rotation distance) delta_pixel_detector = 0.25 # Scalar value of detector pixel spacing in units of ALU -num_det_rows = 14 # number of detector rows +num_det_rows = 29 # number of detector rows num_det_channels = 120 # number of detector channels # Simulated sinogram parameters -num_views = 75 # number of projection views -sino_noise_sigma = 0.01 # transmission noise level +num_views = 50 # number of projection views +sino_noise_sigma = 0.005 # transmission noise level # MACE recon parameters max_admm_itr = 10 # max ADMM iterations for MACE reconstruction +sharpness = 1.0 prior_weight = 0.5 # cumulative weights for three prior agents. # ######### End of parameters ######### @@ -99,11 +100,7 @@ print("shape of original phantom = ", phantom_orig.shape) (Nz, Nx, Ny) = phantom_orig.shape Nx_ds = Nx // 2 + 1 Ny_ds = Ny // 2 + 1 -Nz_ds = Nz // 2 phantom = demo_utils.image_resize(phantom_orig, (Nx_ds, Ny_ds)) - -# Take first half of the slices to form the downsampled phantom. -phantom = phantom[:Nz_ds] print("shape of downsampled phantom = ", phantom.shape) @@ -142,11 +139,12 @@ if denoiser_type == 'dncnn_ct': # Define the denoiser using this model. This version requires some interface code to match with MACE. def denoiser(img_noisy): img_noisy = np.expand_dims(img_noisy, axis=1) + upper_range = denoiser_utils.calc_upper_range(img_noisy) + img_noisy = img_noisy/upper_range testData_obj = denoiser_utils.DataLoader(img_noisy) - denoiser_model.denoise(testData_obj) - img_denoised = np.stack(testData_obj.outData, axis=0) + img_denoised = denoiser_model.denoise(testData_obj, batch_size=256) + img_denoised = img_denoised*upper_range return np.squeeze(img_denoised) - # DnCNN denoiser in Keras. This denoiser model is trained on natural images. elif denoiser_type == 'dncnn_keras': print("Denoiser function: DnCNN trained on natural images.") @@ -175,8 +173,10 @@ print("Performing MACE reconstruction ...") recon_mace = mbircone.mace.mace3D(sino_noisy, angles, dist_source_detector, magnification, denoiser=denoiser, denoiser_args=(), max_admm_itr=max_admm_itr, prior_weight=prior_weight, + sharpness=sharpness, delta_pixel_detector=delta_pixel_detector, - weight_type='transmission') + weight_type='transmission', + save_path=save_path) recon_shape = recon_mace.shape print("Reconstruction shape = ", recon_shape) @@ -186,28 +186,23 @@ print("Reconstruction shape = ", recon_shape) # ########################################################################### print("Post processing MACE reconstruction results ...") # Save recon results as a numpy array -os.makedirs(output_dir, exist_ok=True) -np.save(os.path.join(output_dir, "recon_mace.npy"), recon_mace) - -# Plot sinogram data -demo_utils.plot_image(sino_noisy[0], title='sinogram view 0, noise level=0.05', - filename=os.path.join(output_dir, 'sino_noisy.png'), vmin=0, vmax=4) -demo_utils.plot_image(sino[0], title='clean sinogram view 0', filename=os.path.join(output_dir, 'sino_clean.png'), - vmin=0, vmax=4) -demo_utils.plot_image(noise[0], title='sinogram additive Gaussian noise, view 0', - filename=os.path.join(output_dir, 'sino_transmission_noise.png'), vmin=-0.08, vmax=0.08) +os.makedirs(save_path, exist_ok=True) +np.save(os.path.join(save_path, "recon_mace.npy"), recon_mace) +# load qGGMRF recon +recon_qGGMRF = np.load(os.path.join(save_path, "recon_qGGMRF.npy")) # Plot axial slices of phantom and recon -display_slices = [2, recon_shape[0] // 2] +display_slices = [7, 10, 13, 16, 19, 22] for display_slice in display_slices: demo_utils.plot_image(phantom[display_slice], title=f'phantom, axial slice {display_slice}', - filename=os.path.join(output_dir, f'phantom_slice{display_slice}.png'), vmin=0, vmax=0.5) + filename=os.path.join(save_path, f'phantom_slice{display_slice}.png'), vmin=0, vmax=0.5) demo_utils.plot_image(recon_mace[display_slice], title=f'MACE reconstruction, axial slice {display_slice}', - filename=os.path.join(output_dir, f'recon_mace_slice{display_slice}.png'), vmin=0, vmax=0.5) - + filename=os.path.join(save_path, f'recon_mace_slice{display_slice}.png'), vmin=0, vmax=0.5) + demo_utils.plot_image(recon_qGGMRF[display_slice], title=f'qGGMRF reconstruction, axial slice {display_slice}', + filename=os.path.join(save_path, f'recon_qGGMRF_slice{display_slice}.png'), vmin=0, vmax=0.5) # Plot 3D phantom and recon image volumes as gif images. -demo_utils.plot_gif(phantom_orig, output_dir, 'phantom_original', vmin=0, vmax=0.5) -demo_utils.plot_gif(phantom, output_dir, 'phantom_resized', vmin=0, vmax=0.5) -demo_utils.plot_gif(recon_mace, output_dir, 'recon_mace', vmin=0, vmax=0.5) +demo_utils.plot_gif(phantom, save_path, 'phantom_resized', vmin=0, vmax=0.5) +demo_utils.plot_gif(recon_mace, save_path, 'recon_mace', vmin=0, vmax=0.5) +demo_utils.plot_gif(recon_qGGMRF, save_path, 'recon_qGGMRF', vmin=0, vmax=0.5) input("press Enter") diff --git a/demo/demo_mace4D.py b/demo/demo_mace4D.py new file mode 100644 index 0000000000000000000000000000000000000000..43a3a08c2c24a93e4864df1a71fb5d89c2799c7b --- /dev/null +++ b/demo/demo_mace4D.py @@ -0,0 +1,290 @@ +import os, sys +import numpy as np +import argparse +import urllib.request +import tarfile +from keras.models import model_from_json +import getpass +from psutil import cpu_count +from scipy import ndimage +import mbircone +import demo_utils, denoiser_utils +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + + +if __name__=='__main__': + """ + This script is a demonstration of the mace4D reconstruction algorithm. Demo functionality includes + * downloading 3D phantom and denoiser data from specified urls + * generating 4D simulated data by rotating the 3D phantom with positive angular steps with each time point ... + * obtaining a cluster ticket with get_cluster_ticket(). + * Performing multinode computation with the cluster ticket. This includes: + * generating sinogram data by projecting each phantom at each timepoint, and then adding transmission noise + * performing a 4D MACE reconstruction. + """ + print('This script is a demonstration of the mace4D reconstruction algorithm. Demo functionality includes \ + \n\t * downloading 3D phantom and denoiser data from specified urls. \ + \n\t * generating 4D simulated data by rotating the 3D phantom with positive angular steps with each time point. \ + \n\t * obtaining a cluster ticket with get_cluster_ticket(). \ + \n\t * Performing multinode computation with the cluster ticket. This includes: \ + \n\t * generating sinogram data by projecting each phantom at each timepoint, and then adding transmission noise \ + \n\t * performing a 4D MACE reconstruction.') + + # ########################################################################### + # Set the parameters to get the data and do the recon + # ########################################################################### + + # Change the parameters below for your own use case. + + # Denoiser function to be used in MACE. For the built-in demo, this should be one of dncnn_keras or dncnn_ct + # Other denoisers built in keras can be used with minimal modification by putting the architecture and weights + # in model.json and model.hdf5 in the denoiser_path set below + denoiser_type = 'dncnn_ct' + + # The url to the data repo. + data_repo_url = 'https://github.com/cabouman/mbir_data/raw/master/' + + # Download url to the index file. + # This file will be used to retrieve urls to files that we are going to download + yaml_url = os.path.join(data_repo_url, 'index.yaml') + + # Choice of phantom and denoiser files. + # These should be valid choices specified in the index file. + # The urls to phantom data and NN weights will be parsed from data_repo_url and the choices of phantom and denoiser specified below. + phantom_name = 'bottle_cap_3D' + denoiser_name = denoiser_type + + # destination path to download and extract the phantom and NN weight files. + target_dir = './demo_data/' + # path to store output recon images + save_path = './output/mace4D/' + os.makedirs(save_path, exist_ok=True) + + # Geometry parameters + dist_source_detector = 839.0472 # Distance between the X-ray source and the detector in units of ALU + magnification = 5.572128439964856 # magnification = (source to detector distance)/(source to center-of-rotation distance) + delta_pixel_detector = 0.25 # Scalar value of detector pixel spacing in units of ALU + num_det_rows = 28 # number of detector rows + num_det_channels = 240 # number of detector channels + + # Simulated 4D phantom and sinogram parameters + num_time_points = 8 # number of time points. This is also number of jobs that can be parallellized with multinode computation. + num_views = 75 # number of projection views + sino_noise_sigma = 0.01 # transmission noise level + + # MACE recon parameters + max_admm_itr = 10 # max ADMM iterations for MACE reconstruction + prior_weight = 0.5 # cumulative weights for three prior agents. + sharpness = 1.0 # sharpness + # ######### End of parameters ######### + + + # ########################################################################### + # Download and extract data + # ########################################################################### + + # Download the url index file and return path to local file. + index_path = demo_utils.download_and_extract(yaml_url, target_dir) + # Load the url index file as a directionary + url_index = demo_utils.load_yaml(index_path) + # get urls to phantom and denoiser parameter file + phantom_url = os.path.join(data_repo_url, url_index['phantom'][phantom_name]) # url to download the 3D image volume phantom file + denoiser_url = os.path.join(data_repo_url, url_index['denoiser'][denoiser_name]) # url to download the denoiser parameter file + + # download phantom file + phantom_path = demo_utils.download_and_extract(phantom_url, target_dir) + # download and extract NN weights and structure files + denoiser_path = demo_utils.download_and_extract(denoiser_url, target_dir) + + + # ########################################################################### + # Load the parameters from configuration file to obtain a cluster ticket. + # ########################################################################### + + # Ask for a configuration file to obtain a cluster ticket to access a parallel cluster. + # If the configuration file is not provided, it will automatically set up a LocalCluster based on the number of + # cores in the local computer and return a ticket needed for :py:func:`~multinode.scatter_gather`. + parser = argparse.ArgumentParser(description='A demo help users use mbircone.multinode module on High Performance Computer.') + parser.add_argument('--configs_path', type=str, default=None, help="Path to a configuration file.") + args = parser.parse_args() + save_config_dir = './configs/multinode/' + + + # ########################################################################### + # Get cluster object for multi-node computation + # ########################################################################### + is_multinode = demo_utils.query_yes_no("Are you on a multinode cluster?", default="y") + if not is_multinode: + print("Running computation on a single node machine. There is no multinode parallelization in this case.") + num_physical_cores = cpu_count(logical=False) + cluster_ticket = mbircone.multinode.get_cluster_ticket('LocalHost', num_physical_cores_per_node=num_physical_cores) + else: + print("Running computation on a multinode cluster.") + if args.configs_path is None: + print("Config file path not provided. Please provide the correct answers to the following questions, so that we could set up the multinode computation:") + # create output folder + os.makedirs(save_config_dir, exist_ok=True) + # Create a configuration dictionary for a cluster ticket, by collecting required information from terminal. + configs = demo_utils.create_cluster_ticket_configs(save_config_dir=save_config_dir) + save_config_path = os.path.join(save_config_dir, 'default.yaml') + input(f"Cluster config file saved at {save_config_path}. Press Enter to continue:") + else: + # Load cluster setup parameter. + print(f"Loading cluster information from {args.configs_path}") + configs = demo_utils.load_yaml(args.configs_path) + + cluster_ticket = mbircone.multinode.get_cluster_ticket( + job_queue_system_type=configs['job_queue_system_type'], + num_physical_cores_per_node=configs['cluster_params']['num_physical_cores_per_node'], + num_nodes=configs['cluster_params']['num_nodes'], + maximum_memory_per_node=configs['cluster_params']['maximum_memory_per_node'], + maximum_allowable_walltime=configs['cluster_params']['maximum_allowable_walltime'], + system_specific_args=configs['cluster_params']['system_specific_args'], + local_directory=configs['cluster_params']['local_directory'], + log_directory=configs['cluster_params']['log_directory']) + + print(cluster_ticket) + + + # ########################################################################### + # Load phantom + # ########################################################################### + print("Loading 3D phantom volume ...") + + # load original phantom + phantom_3D = np.load(phantom_path) + print("shape of 3D phantom = ", phantom_3D.shape) + + # ########################################################################### + # Generate a 4D phantom. + # ########################################################################### + print("Generating 4D simulated data by rotating the 3D phantom with positive angular steps with each time point ...") + + # Create the rotation angles and argument lists, and distribute to workers. + phantom_rot_angle_list = np.linspace(0, 40, num_time_points, endpoint=False) # Phantom rotation angles. + phantom_4D = np.array([ndimage.rotate(phantom_3D, + phantom_rot_angle, + order=5, + mode='constant', + axes=(1, 2), + reshape=False) + for phantom_rot_angle in phantom_rot_angle_list]) + + print("shape of 4D phantom = ", phantom_4D.shape) + # ########################################################################### + # Generate sinogram + # ########################################################################### + + print("****Multinode computation with Dask****: Generating sinogram data by projecting each phantom at each timepoint ...") + # scatter_gather parallel computes mbircone.cone3D.project + # Generate sinogram data by projecting each phantom in phantom list. + # Create the projection angles and argument lists, and distribute to workers. + angles = np.linspace(0, 2 * np.pi, num_views, endpoint=False) # Same for all time points. + # After setting the geometric parameter, the shape of the input phantom should be equal to the calculated + # geometric parameter. Input a phantom with wrong shape will generate a bunch of issue in C. + variable_args_list = [{'image': phantom_rot} for phantom_rot in phantom_4D] + constant_args = {'angles': angles, + 'num_det_rows': num_det_rows, + 'num_det_channels': num_det_channels, + 'dist_source_detector': dist_source_detector, + 'magnification': magnification, + 'delta_pixel_detector': delta_pixel_detector} + sino_list = mbircone.multinode.scatter_gather(cluster_ticket, + mbircone.cone3D.project, + constant_args=constant_args, + variable_args_list=variable_args_list, verbose=1) + sino = np.array(sino_list) + weights = mbircone.cone3D.calc_weights(sino, weight_type='transmission') + + # Add transmission noise + noise = sino_noise_sigma * 1. / np.sqrt(weights) * np.random.normal(size=sino.shape) + sino_noisy = sino + noise + + + # ########################################################################### + # Set up the denoiser + # ########################################################################### + # This demo includes a custom CNN trained on CT images and a generic DnCNN in keras + # The choice is set in the parameter section above. + print("Loading denoiser function and model ...") + + # DnCNN denoiser trained on CT images. This is the denoiser that we recommend using. + if denoiser_type == 'dncnn_ct': + print("Denoiser function: custom DnCNN trained on CT images.") + + # Load denoiser model structure and weights + denoiser_model = denoiser_utils.DenoiserCT(checkpoint_dir=os.path.join(denoiser_path, 'model_dncnn_ct')) + + # Define the denoiser using this model. This version requires some interface code to match with MACE. + def denoiser(img_noisy): + upper_range = denoiser_utils.calc_upper_range(img_noisy) + img_noisy = img_noisy/upper_range + testData_obj = denoiser_utils.DataLoader(img_noisy) + img_denoised = denoiser_model.denoise(testData_obj, batch_size=256) + img_denoised = img_denoised*upper_range + return np.squeeze(img_denoised) + + # DnCNN denoiser in Keras. This denoiser model is trained on natural images. + elif denoiser_type == 'dncnn_keras': + print("Denoiser function: DnCNN trained on natural images.") + + # Load denoiser model structure and weights + json_path = os.path.join(denoiser_path, 'model_dncnn_keras/model.json') # model architecture file + weight_path = os.path.join(denoiser_path, 'model_dncnn_keras/model.hdf5') # model weight file + with open(json_path, 'r') as json_file: + denoiser_model = model_from_json(json_file.read()) # load model architecture + + denoiser_model.load_weights(weight_path) # load model weights + + # Define the denoiser using this model. + def denoiser(img_noisy): + img_denoised = denoiser_model.predict(img_noisy) # inference + return np.squeeze(img_denoised) + else: + raise RuntimeError('Unkown denoiser_type. Should be either dncnn_ct or dncnn_keras.') + + + # ########################################################################### + # Perform MACE reconstruction + # ########################################################################### + print("****Multinode computation with Dask****: Performing MACE reconstruction ...") + recon_mace = mbircone.mace.mace4D(sino_noisy, angles, dist_source_detector, magnification, + denoiser=denoiser, denoiser_args=(), + max_admm_itr=max_admm_itr, prior_weight=prior_weight, + cluster_ticket=cluster_ticket, + delta_pixel_detector=delta_pixel_detector, + weight_type='transmission', + sharpness=sharpness, + save_path=save_path) + recon_shape = recon_mace.shape + print("Reconstruction shape = ", recon_shape) + + + # ########################################################################### + # Post-process reconstruction results + # ########################################################################### + print("Post processing MACE reconstruction results ...") + # Save recon results as a numpy array + np.save(os.path.join(save_path, "recon_mace.npy"), recon_mace) + # load qGGMRF recon + + recon_qGGMRF = np.load(os.path.join(save_path, "recon_qGGMRF.npy")) + # Plot axial slices of phantom and recon + display_slices = [7, 10, 13, 16, 19, 22] + for t in range(num_time_points//4,3*num_time_points//4): + for display_slice in display_slices: + demo_utils.plot_image(phantom_4D[t,display_slice,:,:], + title=f"phantom, axial slice {display_slice}, time point {t}", + filename=os.path.join(save_path, f"phantom_slice{display_slice}_time{t}.png"), + vmin=0, vmax=0.5) + demo_utils.plot_image(recon_mace[t,display_slice,:,:], title=f"MACE reconstruction, axial slice {display_slice}, time point {t}", + filename=os.path.join(save_path, f"recon_mace_slice{display_slice}_time{t}.png"), vmin=0, vmax=0.5) + demo_utils.plot_image(recon_qGGMRF[t,display_slice,:,:], title=f"qGGMRF reconstruction, axial slice {display_slice}, time point {t}", + filename=os.path.join(save_path, f"recon_qGGMRF_slice{display_slice}_time{t}.png"), vmin=0, vmax=0.5) + + # Plot 3D phantom and recon image volumes as gif images. + demo_utils.plot_gif(phantom_4D[t], save_path, f'phantom_resized_{t}', vmin=0, vmax=0.5) + demo_utils.plot_gif(recon_mace[t], save_path, f'recon_mace_{t}', vmin=0, vmax=0.5) + demo_utils.plot_gif(recon_qGGMRF[t], save_path, f'recon_qGGMRF_{t}', vmin=0, vmax=0.5) + + print(f"Reconstruction results saved in {save_path}") diff --git a/demo/demo_mace4D_fast.py b/demo/demo_mace4D_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..ac52c9728d77c0024464cef7041be66c62339e93 --- /dev/null +++ b/demo/demo_mace4D_fast.py @@ -0,0 +1,302 @@ +import os, sys +import numpy as np +import argparse +import urllib.request +import tarfile +from keras.models import model_from_json +import getpass +from psutil import cpu_count +from scipy import ndimage +import mbircone +import demo_utils, denoiser_utils +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + + +if __name__=='__main__': + """ + This script is a fast demonstration of the mace4D reconstruction algorithm. Demo functionality includes + * downloading 3D phantom and denoiser data from specified urls + * generating 4D simulated data by rotating the 3D phantom with positive angular steps with each time point ... + * downsampling the phantom along all three dimensions + * obtaining a cluster ticket with get_cluster_ticket(). + * Performing multinode computation with the cluster ticket. This includes: + * generating sinogram data by projecting each phantom at each timepoint, and then adding transmission noise + * performing a 4D MACE reconstruction. + """ + print('This script is a demonstration of the mace4D reconstruction algorithm. Demo functionality includes \ + \n\t * downloading 3D phantom and denoiser data from specified urls. \ + \n\t * generating 4D simulated data by rotating the 3D phantom with positive angular steps with each time point. \ + \n\t * downsampling the phantom along all three dimensions \ + \n\t * obtaining a cluster ticket with get_cluster_ticket(). \ + \n\t * Performing multinode computation with the cluster ticket. This includes: \ + \n\t * generating sinogram data by projecting each phantom at each timepoint, and then adding transmission noise \ + \n\t * performing a 4D MACE reconstruction.') + + # ########################################################################### + # Set the parameters to get the data and do the recon + # ########################################################################### + + # Change the parameters below for your own use case. + + # Denoiser function to be used in MACE. For the built-in demo, this should be one of dncnn_keras or dncnn_ct + # Other denoisers built in keras can be used with minimal modification by putting the architecture and weights + # in model.json and model.hdf5 in the denoiser_path set below + denoiser_type = 'dncnn_ct' + + # The url to the data repo. + data_repo_url = 'https://github.com/cabouman/mbir_data/raw/master/' + + # Download url to the index file. + # This file will be used to retrieve urls to files that we are going to download + yaml_url = os.path.join(data_repo_url, 'index.yaml') + + # Choice of phantom and denoiser files. + # These should be valid choices specified in the index file. + # The urls to phantom data and NN weights will be parsed from data_repo_url and the choices of phantom and denoiser specified below. + phantom_name = 'bottle_cap_3D' + denoiser_name = denoiser_type + + # destination path to download and extract the phantom and NN weight files. + target_dir = './demo_data/' + # path to store output recon images + save_path = './output/mace4D_fast/' + os.makedirs(save_path, exist_ok=True) + + # Geometry parameters + dist_source_detector = 839.0472 # Distance between the X-ray source and the detector in units of ALU + magnification = 5.572128439964856 # magnification = (source to detector distance)/(source to center-of-rotation distance) + delta_pixel_detector = 0.25 # Scalar value of detector pixel spacing in units of ALU + num_det_rows = 29 # number of detector rows + num_det_channels = 120 # number of detector channels + + # Simulated 4D phantom and sinogram parameters + num_time_points = 8 # number of time points. This is also number of jobs that can be parallellized with multinode computation. + num_views = 50 # number of projection views + sino_noise_sigma = 0.005 # transmission noise level + + # MACE recon parameters + max_admm_itr = 10 # max ADMM iterations for MACE reconstruction + prior_weight = 0.5 # cumulative weights for three prior agents. + sharpness = 1.0 + # ######### End of parameters ######### + + + # ########################################################################### + # Download and extract data + # ########################################################################### + + # Download the url index file and return path to local file. + index_path = demo_utils.download_and_extract(yaml_url, target_dir) + # Load the url index file as a directionary + url_index = demo_utils.load_yaml(index_path) + # get urls to phantom and denoiser parameter file + phantom_url = os.path.join(data_repo_url, url_index['phantom'][phantom_name]) # url to download the 3D image volume phantom file + denoiser_url = os.path.join(data_repo_url, url_index['denoiser'][denoiser_name]) # url to download the denoiser parameter file + + # download phantom file + phantom_path = demo_utils.download_and_extract(phantom_url, target_dir) + # download and extract NN weights and structure files + denoiser_path = demo_utils.download_and_extract(denoiser_url, target_dir) + + + # ########################################################################### + # Load the parameters from configuration file to obtain a cluster ticket. + # ########################################################################### + + # Ask for a configuration file to obtain a cluster ticket to access a parallel cluster. + # If the configuration file is not provided, it will automatically set up a LocalCluster based on the number of + # cores in the local computer and return a ticket needed for :py:func:`~multinode.scatter_gather`. + parser = argparse.ArgumentParser(description='A demo help users use mbircone.multinode module on High Performance Computer.') + parser.add_argument('--configs_path', type=str, default=None, help="Path to a configuration file.") + args = parser.parse_args() + save_config_dir = './configs/multinode/' + + + # ########################################################################### + # Get cluster object for multi-node computation + # ########################################################################### + is_multinode = demo_utils.query_yes_no("Are you on a multinode cluster?", default="y") + if not is_multinode: + print("Running computation on a single node machine. There is no multinode parallelization in this case.") + num_physical_cores = cpu_count(logical=False) + cluster_ticket = mbircone.multinode.get_cluster_ticket('LocalHost', num_physical_cores_per_node=num_physical_cores) + else: + print("Running computation on a multinode cluster.") + if args.configs_path is None: + print("Config file path not provided. Please provide the correct answers to the following questions, so that we could set up the multinode computation:") + # create output folder + os.makedirs(save_config_dir, exist_ok=True) + # Create a configuration dictionary for a cluster ticket, by collecting required information from terminal. + configs = demo_utils.create_cluster_ticket_configs(save_config_dir=save_config_dir) + save_config_path = os.path.join(save_config_dir, 'default.yaml') + input(f"Cluster config file saved at {save_config_path}. Press Enter to continue:") + else: + # Load cluster setup parameter. + print(f"Loading cluster information from {args.configs_path}") + configs = demo_utils.load_yaml(args.configs_path) + + cluster_ticket = mbircone.multinode.get_cluster_ticket( + job_queue_system_type=configs['job_queue_system_type'], + num_physical_cores_per_node=configs['cluster_params']['num_physical_cores_per_node'], + num_nodes=configs['cluster_params']['num_nodes'], + maximum_memory_per_node=configs['cluster_params']['maximum_memory_per_node'], + maximum_allowable_walltime=configs['cluster_params']['maximum_allowable_walltime'], + system_specific_args=configs['cluster_params']['system_specific_args'], + local_directory=configs['cluster_params']['local_directory'], + log_directory=configs['cluster_params']['log_directory']) + + print(cluster_ticket) + + + # ########################################################################### + # Load phantom + # ########################################################################### + print("Loading 3D phantom volume ...") + + # load original phantom + phantom_3D = np.load(phantom_path) + print("shape of original 3D phantom = ", phantom_3D.shape) + + # downsample the original phantom along slice axis + (Nz, Nx, Ny) = phantom_3D.shape + Nx_ds = Nx // 2 + 1 + Ny_ds = Ny // 2 + 1 + #Nz_ds = Nz // 2 + phantom_3D = demo_utils.image_resize(phantom_3D, (Nx_ds, Ny_ds)) + #phantom_3D = phantom_3D[:Nz_ds] + # Take first half of the slices to form the downsampled phantom. + print("shape of downsampled 3D phantom = ", phantom_3D.shape) + + # ########################################################################### + # Generate a 4D phantom. + # ########################################################################### + print("Generating 4D simulated data by rotating the 3D phantom with positive angular steps with each time point ...") + + # Create the rotation angles and argument lists, and distribute to workers. + phantom_rot_angle_list = np.linspace(0, 40, num_time_points, endpoint=False) # Phantom rotation angles. + phantom_4D = np.array([ndimage.rotate(phantom_3D, + phantom_rot_angle, + order=5, + mode='constant', + axes=(1, 2), + reshape=False) + for phantom_rot_angle in phantom_rot_angle_list]) + + print("shape of 4D phantom = ", phantom_4D.shape) + # ########################################################################### + # Generate sinogram + # ########################################################################### + + print("****Multinode computation with Dask****: Generating sinogram data by projecting each phantom at each timepoint ...") + # scatter_gather parallel computes mbircone.cone3D.project + # Generate sinogram data by projecting each phantom in phantom list. + # Create the projection angles and argument lists, and distribute to workers. + angles = np.linspace(0, 2 * np.pi, num_views, endpoint=False) # Same for all time points. + # After setting the geometric parameter, the shape of the input phantom should be equal to the calculated + # geometric parameter. Input a phantom with wrong shape will generate a bunch of issue in C. + variable_args_list = [{'image': phantom_rot} for phantom_rot in phantom_4D] + constant_args = {'angles': angles, + 'num_det_rows': num_det_rows, + 'num_det_channels': num_det_channels, + 'dist_source_detector': dist_source_detector, + 'magnification': magnification, + 'delta_pixel_detector': delta_pixel_detector} + sino_list = mbircone.multinode.scatter_gather(cluster_ticket, + mbircone.cone3D.project, + constant_args=constant_args, + variable_args_list=variable_args_list, verbose=1) + sino = np.array(sino_list) + weights = mbircone.cone3D.calc_weights(sino, weight_type='transmission') + + # Add transmission noise + noise = sino_noise_sigma * 1. / np.sqrt(weights) * np.random.normal(size=sino.shape) + sino_noisy = sino + noise + + + # ########################################################################### + # Set up the denoiser + # ########################################################################### + # This demo includes a custom CNN trained on CT images and a generic DnCNN in keras + # The choice is set in the parameter section above. + print("Loading denoiser function and model ...") + + # DnCNN denoiser trained on CT images. This is the denoiser that we recommend using. + if denoiser_type == 'dncnn_ct': + print("Denoiser function: custom DnCNN trained on CT images.") + + # Load denoiser model structure and weights + denoiser_model = denoiser_utils.DenoiserCT(checkpoint_dir=os.path.join(denoiser_path, 'model_dncnn_ct')) + + # Define the denoiser using this model. This version requires some interface code to match with MACE. + def denoiser(img_noisy): + upper_range = denoiser_utils.calc_upper_range(img_noisy) + img_noisy = img_noisy/upper_range + testData_obj = denoiser_utils.DataLoader(img_noisy) + img_denoised = denoiser_model.denoise(testData_obj, batch_size=256) + img_denoised = img_denoised*upper_range + return np.squeeze(img_denoised) + + # DnCNN denoiser in Keras. This denoiser model is trained on natural images. + elif denoiser_type == 'dncnn_keras': + print("Denoiser function: DnCNN trained on natural images.") + + # Load denoiser model structure and weights + json_path = os.path.join(denoiser_path, 'model_dncnn_keras/model.json') # model architecture file + weight_path = os.path.join(denoiser_path, 'model_dncnn_keras/model.hdf5') # model weight file + with open(json_path, 'r') as json_file: + denoiser_model = model_from_json(json_file.read()) # load model architecture + + denoiser_model.load_weights(weight_path) # load model weights + + # Define the denoiser using this model. + def denoiser(img_noisy): + img_denoised = denoiser_model.predict(img_noisy) # inference + return np.squeeze(img_denoised) + else: + raise RuntimeError('Unkown denoiser_type. Should be either dncnn_ct or dncnn_keras.') + + + # ########################################################################### + # Perform MACE reconstruction + # ########################################################################### + print("****Multinode computation with Dask****: Performing MACE reconstruction ...") + recon_mace = mbircone.mace.mace4D(sino_noisy, angles, dist_source_detector, magnification, + denoiser=denoiser, denoiser_args=(), + max_admm_itr=max_admm_itr, prior_weight=prior_weight, + cluster_ticket=cluster_ticket, + delta_pixel_detector=delta_pixel_detector, + weight_type='transmission', + sharpness=sharpness, + save_path=save_path) + recon_shape = recon_mace.shape + print("Reconstruction shape = ", recon_shape) + + + # ########################################################################### + # Post-process reconstruction results + # ########################################################################### + print("Post processing MACE reconstruction results ...") + # Save recon results as a numpy array + np.save(os.path.join(save_path, "recon_mace.npy"), recon_mace) + # load qGGMRF recon results + recon_qGGMRF = np.load(os.path.join(save_path, "recon_qGGMRF.npy")) + + # Plot axial slices of phantom and recon + display_slices = [7, 10, 13, 16, 19, 22] + for t in range(num_time_points//4,3*num_time_points//4): + for display_slice in display_slices: + demo_utils.plot_image(phantom_4D[t,display_slice,:,:], + title=f"phantom, axial slice {display_slice}, time point {t}", + filename=os.path.join(save_path, f"phantom_slice{display_slice}_time{t}.png"), + vmin=0, vmax=0.5) + demo_utils.plot_image(recon_mace[t,display_slice,:,:], title=f"MACE reconstruction, axial slice {display_slice}, time point {t}", + filename=os.path.join(save_path, f"recon_mace_slice{display_slice}_time{t}.png"), vmin=0, vmax=0.5) + demo_utils.plot_image(recon_qGGMRF[t,display_slice,:,:], title=f"qGGMRF reconstruction, axial slice {display_slice}, time point {t}", + filename=os.path.join(save_path, f"recon_qGGMRF_slice{display_slice}_time{t}.png"), vmin=0, vmax=0.5) + + # Plot 3D phantom and recon image volumes as gif images. + demo_utils.plot_gif(phantom_4D[t], save_path, f'phantom_resized_{t}', vmin=0, vmax=0.5) + demo_utils.plot_gif(recon_mace[t], save_path, f'recon_mace_{t}', vmin=0, vmax=0.5) + demo_utils.plot_gif(recon_qGGMRF[t], save_path, f'recon_qGGMRF_{t}', vmin=0, vmax=0.5) + + print(f"Reconstruction results saved in {save_path}") diff --git a/demo/demo_utils.py b/demo/demo_utils.py index 3dbe7482628acd740568e68eb232a7bb5e90a4ed..abf271d8c27bc3ee0f7654906da888ad3c183ff7 100644 --- a/demo/demo_utils.py +++ b/demo/demo_utils.py @@ -178,7 +178,7 @@ def image_resize(image, output_shape): image_resized = np.empty((image.shape[0], output_shape[0], output_shape[1]), dtype=image.dtype) for i in range(image.shape[0]): PIL_image = Image.fromarray(image[i]) - PIL_image_resized = PIL_image.resize((output_shape[1], output_shape[0]), resample=Image.BILINEAR) + PIL_image_resized = PIL_image.resize((output_shape[1], output_shape[0]), resample=Image.BICUBIC) image_resized[i] = np.array(PIL_image_resized) return image_resized @@ -240,11 +240,10 @@ def download_and_extract(download_url, save_dir): # Parse extracted dir and extract data if necessary return save_path - -def query_yes_no(question): +def query_yes_no(question, default="n"): """Ask a yes/no question via input() and return the answer. Code modified from reference: `https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input/3041990` - + Args: question (string): Question that is presented to the user. Returns: @@ -252,12 +251,12 @@ def query_yes_no(question): """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} - prompt = " [y/n, default=n] " + prompt = f" [y/n, default={default}] " while True: sys.stdout.write(question + prompt) choice = input().lower() if choice == "": - return False + return valid[default] elif choice in valid: return valid[choice] else: @@ -365,7 +364,7 @@ def create_cluster_ticket_configs(save_config_dir, save_config_name='default'): else: sys.stdout.write("Please Enter a positive number.\n") ask_times -= 1 - + # Ask for the maximum allowable walltime. question = '\nPlease enter the maximum allowable walltime.' prompt = 'This should be a string in the form D-HH:MM:SS. E.g., \'0-01:00:00\' for one hour.\n' @@ -373,16 +372,10 @@ def create_cluster_ticket_configs(save_config_dir, save_config_name='default'): sys.stdout.write(prompt) choice = input() - try: - validate(choice, strftime_format('%d-%H:%M:%S')) - walltime_valid = True - except ValidationError: - walltime_valid = False - - if walltime_valid: + if choice != "": config['cluster_params']['maximum_allowable_walltime'] = choice else: - sys.stdout.write("Since the entered string did not match the format, the scheduler will allocate system-determined maximum allowable walltime.\n") + config['cluster_params']['maximum_allowable_walltime'] = None # Ask for the maximum memory per node. question = '\nPlease enter the maximum memory per node. [Default = 16GB]\n' @@ -394,7 +387,7 @@ def create_cluster_ticket_configs(save_config_dir, save_config_name='default'): config['cluster_params']['maximum_memory_per_node'] = choice else: config['cluster_params']['maximum_memory_per_node'] = '16GB' - sys.stdout.write("Set maximum_memory_per_node to default value '16GB'.\n") + sys.stdout.write("Set maximum_memory_per_node to default value '16GB'.\n") # Ask for any additional arguments to pass to the job scheduling system. question = '\nPlease enter any additional arguments to pass to the job scheduling system. [Default = ""]\n' diff --git a/demo/denoiser_utils.py b/demo/denoiser_utils.py index ac5dc9ba11944f90ff0cac490e4a8186d01aceaf..f346d02c6badd8e12d86e489f5df780587687671 100644 --- a/demo/denoiser_utils.py +++ b/demo/denoiser_utils.py @@ -1,18 +1,36 @@ -import os import time import sys +#import tensorflow as tf import tensorflow.compat.v1 as tf tf.disable_v2_behavior() import numpy as np +from skimage.filters import gaussian + +def dncnn(network_in, is_training=True, size_z_in=1, size_z_out=1, numLayers=17, width=64, is_normalize=0): + with tf.variable_scope('block1'): + layer_out = tf.layers.conv2d(network_in, width, 3, padding='same', activation=tf.nn.relu) + + for layers in range(2, numLayers): + with tf.variable_scope('block%d' % layers): + layer_out = tf.layers.conv2d(layer_out, width, 3, padding='same', name='conv%d' % layers, use_bias=False) + # print(layer_out.shape) + layer_out = tf.nn.relu(tf.layers.batch_normalization(layer_out, training=is_training)) + + with tf.variable_scope('block17'): + layer_out = tf.layers.conv2d(layer_out, size_z_out, 3, padding='same') + + # network_out = network_in - layer_out + network_in_sliced = semi2DCNN_select_z_out_from_z_in(network_in, size_z_in, size_z_out) + network_out = network_in_sliced - layer_out + # print("denoised shape in dncnn: {}".format(network_out.shape)) + + return network_out class DenoiserCT: - """ DnCNN-CT denoiser class. - """ def __init__(self, checkpoint_dir, size_z_in=5, size_z_out=1, numLayers=17, width=64): self.checkpoint_dir = checkpoint_dir - self.sess = tf.Session(config=tf.ConfigProto()) - #self.sess = tf.compat.v1.Session() + self.sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9))) self.size_z_in = size_z_in self.size_z_out = size_z_out self.numLayers = numLayers @@ -22,11 +40,13 @@ class DenoiserCT: # build model self.Y_ = tf.placeholder(tf.float32, [None, None, None, self.size_z_out], name='clean_image') self.X = tf.placeholder(tf.float32, [None, None, None, self.size_z_in], name='noisy_image') - self.Y = self._dncnn() + self.Y = dncnn(self.X, is_training=self.is_training, + size_z_in=self.size_z_in, size_z_out=self.size_z_out, + numLayers=self.numLayers, width=self.width) self.R = semi2DCNN_select_z_out_from_z_in(self.X, self.size_z_in, self.size_z_out) - self.Y # residual = input - output self.loss = tf.losses.mean_squared_error(labels=self.Y_ , predictions=self.Y ) self.psnr = tf_psnr(self.Y, self.Y_, 1.0) - + optimizer = tf.train.AdamOptimizer(self.learning_rate, name='AdamOptimizer') update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): @@ -35,38 +55,33 @@ class DenoiserCT: initializer = tf.global_variables_initializer() self.sess.run(initializer) print("############################## Initialized Model Successfully...") - load_model_status = self._load() assert load_model_status == True, 'Load weights FAILED from {}'.format(self.checkpoint_dir) - + - def denoise(self, testData_obj): + def denoise(self, testData_obj, batch_size=None): """ Denoise function. This function takes a DataLoader class object as input, denoise the testData_obj.inData, and save the denoised images as testData_obj.outData. """ - - for idx, noisy_img in enumerate(testData_obj): + noisy_img = np.array([noisy_img[0] for noisy_img in testData_obj]) + n_img = noisy_img.shape[0] + N_batch, Nt, Nx, Ny = np.shape(testData_obj.inData) + if batch_size is None: denoised_img, _ = self.sess.run([self.Y, self.R], feed_dict={self.X: noisy_img, self.is_training: False}) - testData_obj.setOutput_current(denoised_img) - - - def _dncnn(self): - """ Network architecture DnCNN-CT - """ - with tf.variable_scope('block1'): - layer_out = tf.layers.conv2d(self.X, self.width, 3, padding='same', activation=tf.nn.relu) - - for layers in range(2, self.numLayers): - with tf.variable_scope('block%d' % layers): - layer_out = tf.layers.conv2d(layer_out, self.width, 3, padding='same', name='conv%d' % layers, use_bias=False) - layer_out = tf.nn.relu(tf.layers.batch_normalization(layer_out, training=self.is_training)) - - with tf.variable_scope('block17'): - layer_out = tf.layers.conv2d(layer_out, self.size_z_out, 3, padding='same') - - network_in_sliced = semi2DCNN_select_z_out_from_z_in(self.X, self.size_z_in, self.size_z_out) - network_out = network_in_sliced - layer_out - return network_out - + else: + denoised_img = [] + for n in range(0, n_img, batch_size): + if n+batch_size>n_img: + batch_noisy = noisy_img[n:,:,:,:] + else: + batch_noisy = noisy_img[n:n+batch_size,:,:,:] + denoised_batch, _ = self.sess.run([self.Y, self.R], feed_dict={self.X: batch_noisy, self.is_training: False}) + if not len(denoised_img): + denoised_img = np.array(denoised_batch) + else: + denoised_img = np.append(denoised_img, denoised_batch, axis=0) + denoised_img = np.reshape(denoised_img, (N_batch, -1, Nx, Ny)) + return denoised_img + def _load(self): saver = tf.train.Saver() @@ -78,8 +93,7 @@ class DenoiserCT: load_model_status = True else: load_model_status = False - - return load_model_status + return load_model_status class DataLoader: @@ -150,3 +164,24 @@ def semi2DCNN_select_z_out_from_z_in(img_patch, size_z_in, size_z_out): padSize_in1, padSize_in2 = semi2DCNN_inPad(size_z_in, size_z_out) return img_patch[:,:,:,padSize_in1:padSize_in1+size_z_out] + +def calc_upper_range(img, percentile=75, gauss_sigma=2., is_segment=True, threshold=0.5): + ''' + Given a 4D image volume of shape (Nz, Nt, Nx, Ny), calculate the image upper range. + ''' + assert(np.ndim(img)<=4), 'Error! Input image dim must be 4 or lower!' + if np.ndim(img) < 4: + print(f"{np.ndim(img)} dim image provided. Automatically adding axes to make the input a 4D image volume.") + for _ in range(4-np.ndim(img)): + img = np.expand_dims(img, axis=0) + Nz, Nt, _, _ = np.shape(img) + img_smooth = np.array([[gaussian(img[i,t,:,:], gauss_sigma, preserve_range=True) for t in range(Nt)] for i in range(Nz)]) + if is_segment: + img_mean = np.mean(img_smooth) + indicator = img_smooth > threshold * img_mean + else: + indicator = np.ones(img.shape) + img_upper_range = np.percentile(img_smooth[indicator], percentile) + #return img_upper_range, np.squeeze(img_smooth), np.squeeze(indicator) + return img_upper_range + diff --git a/demo/requirements_demo.txt b/demo/requirements_demo.txt index 525e0fddee56993e5efd237f43cbefbd027496f1..c8cad45c758e1984c460ea7fcd6970e83292dab1 100644 --- a/demo/requirements_demo.txt +++ b/demo/requirements_demo.txt @@ -1,8 +1,8 @@ matplotlib imageio -scipy +scipy~=1.8 tensorflow~=2.6.0 keras~=2.6.0 PyYAML datatest - +scikit-image diff --git a/dev_scripts/clean_install_conda_svmbir_docs.sh b/dev_scripts/clean_install_all.sh similarity index 100% rename from dev_scripts/clean_install_conda_svmbir_docs.sh rename to dev_scripts/clean_install_all.sh diff --git a/docs/source/mace.rst b/docs/source/mace.rst index 6ddcb046a52cd2f172b3eb88b4422ce96499d591..7225a73b887bc05c74954fd8c22f9da3266eda27 100644 --- a/docs/source/mace.rst +++ b/docs/source/mace.rst @@ -1,7 +1,7 @@ mbircone.mace --------------- .. automodule:: mbircone.mace - :members: mace3D + :members: mace3D, mace4D :undoc-members: :show-inheritance: @@ -10,3 +10,4 @@ mbircone.mace .. autosummary:: mace3D + mace4D diff --git a/mbircone/__init__.py b/mbircone/__init__.py index 4bbce929f9a2b6ea99baa9f442ce917bf32c20cc..14d6dec4805a5aae2cac03d664e9975e6a00f3ee 100644 --- a/mbircone/__init__.py +++ b/mbircone/__init__.py @@ -1,7 +1,7 @@ from .preprocess import * from .cone3D import * -from .mace import mace3D +from .mace import mace3D, mace4D from .phantom import * from .multinode import * #__all__ = ['auto_sigma_x','auto_sigma_p','auto_sigma_y', 'calc_weights', 'compute_sino_params', 'compute_img_params', 'compute_img_size', 'project', 'recon', 'pad_roi2ror', 'extract_roi_from_ror'] diff --git a/mbircone/mace.py b/mbircone/mace.py index 7bf7e22541aefac206364d4dc3b7880813107179..a7acb171ad6928251eb9332eb3efe22f2bf3b535 100644 --- a/mbircone/mace.py +++ b/mbircone/mace.py @@ -1,10 +1,12 @@ import numpy as np -import os,sys +import os import time import mbircone.cone3D as cone3D +import mbircone.multinode as multinode __lib_path = os.path.join(os.path.expanduser('~'), '.cache', 'mbircone') + def compute_inv_permute_vector(permute_vector): """ Given a permutation vector, compute its inverse permutation vector s.t. an array will have the same shape after permutation and inverse permutation. """ @@ -18,52 +20,30 @@ def compute_inv_permute_vector(permute_vector): return tuple(inv_permute_vector) -def normalize(img, image_range): - """Normalizes ``img`` from specified image range to the range of (0,1). - """ - - #print('original image range:',image_range) - img_normalized = (img-image_range[0])/(image_range[1]-image_range[0]) - #print('normalized image range:',np.percentile(img_normalized,10),np.percentile(img_normalized,90)) - return img_normalized - - -def denormalize(img_normalized, image_range): - """Denormalizes ``img_normalized`` from (0,1) to desired image range. - """ - - img = img_normalized*(image_range[1]-image_range[0])+image_range[0] - return img - - -def denoiser_wrapper(image_noisy, denoiser, denoiser_args, image_range, permute_vector=(0,1,2), positivity=True): +def denoiser_wrapper(image_noisy, denoiser, denoiser_args, permute_vector, positivity=True): """ This is a denoiser wrapper function. Given an image volume to be denoised, the wrapper function permutes and normalizes the image, passes it to a denoiser function, and permutes and denormalizes the denoised image back. - + Args: image_noisy (ndarray): image volume to be denoised denoiser (callable): The denoiser function to be used. - + ``denoiser(x, *denoiser_args) -> ndarray`` - + where ``x`` is an ndarray of the noisy image volume, and ``denoiser_args`` is a tuple of the fixed parameters needed to completely specify the denoiser function. denoiser_args (tuple): [Default=()] Extra arguments passed to the denoiser function. - image_range (tuple): dynamic range of reconstruction image. - permute_vector (tuple): [Default=(0,1,2)] + permute_vector (tuple): permutation on the noisy image before passing to the denoiser function. It contains a permutation of [0,1,..,N-1] where N is the number of axes of image_noisy. The i’th axis of the permuted array will correspond to the axis numbered axes[i] of image_noisy. If not specified, defaults to (0,1,2), which effectively does no permutation. + An inverse permutation is performed on the denoised image to make sure that the returned image has the same shape as the input noisy image. positivity: positivity constraint for denoiser output. If True, positivity will be enforced by clipping the denoiser output to be non-negative. - + Returns: - ndarray: denoised image with same shape and dimensionality as input image ``image_noisy`` + ndarray: denoised image with same shape and dimensionality as input image ``image_noisy`` """ - # permute the 3D image s.t. the desired denoising dimensionality is moved to axis=0 image_noisy = np.transpose(image_noisy, permute_vector) - image_noisy_norm = normalize(image_noisy, image_range) - # denoise! - image_denoised_norm = denoiser(image_noisy_norm, *denoiser_args) - # denormalize image from [0,1] to original dynamic range - image_denoised = denormalize(image_denoised_norm, image_range) + # denoise + image_denoised = denoiser(image_noisy, *denoiser_args) if positivity: image_denoised=np.clip(image_denoised, 0, None) # permute the denoised image back @@ -75,17 +55,18 @@ def denoiser_wrapper(image_noisy, denoiser, denoiser_args, image_range, permute_ def mace3D(sino, angles, dist_source_detector, magnification, denoiser, denoiser_args=(), max_admm_itr=10, rho=0.5, prior_weight=0.5, - init_image=None, image_range=None, + init_image=None, save_path=None, channel_offset=0.0, row_offset=0.0, rotation_offset=0.0, delta_pixel_detector=1.0, delta_pixel_image=None, ror_radius=None, sigma_y=None, snr_db=30.0, weights=None, weight_type='unweighted', positivity=True, p=1.2, q=2.0, T=1.0, num_neighbors=6, sharpness=0.0, sigma_x=None, sigma_p=None, max_iterations=3, stop_threshold=0.02, - num_threads=None, NHICD=False, verbose=1, lib_path=__lib_path): - """Computes 3D conebeam beam reconstruction with multi-slice MACE alogorithm by fusing forward model proximal map with 2D denoisers across xy, xz, and yz planes. + num_threads=None, NHICD=False, verbose=1, + lib_path=__lib_path): + """Computes 3-D conebeam beam reconstruction with multi-slice MACE alogorithm by fusing forward model proximal map with 2D denoisers across xy, xz, and yz planes. Required arguments: - - **sino** (*ndarray*): 3D sinogram array with shape (num_views, num_det_rows, num_det_channels) + - **sino** (*ndarray*): 3-D sinogram array with shape (num_views, num_det_rows, num_det_channels) - **angles** (*ndarray*): 1D view angles array in radians. - **dist_source_detector** (*float*): Distance between the X-ray source and the detector in units of ALU - **magnification** (*float*): Magnification of the cone-beam geometry defined as (source to detector distance)/(source to center-of-rotation distance). @@ -99,8 +80,8 @@ def mace3D(sino, angles, dist_source_detector, magnification, - **max_admm_itr** (*int*): [Default=10] Maximum number of MACE ADMM iterations. - **rho** (*float*): [Default=0.5] step size of ADMM update in MACE, range (0,1). The value of ``rho`` mainly controls the convergence speed of MACE algorithm. - **prior_weight** (*ndarray*): [Default=0.5] weights for prior agents, specified by either a scalar value or a 1D array. If a scalar is specified, then all three prior agents use the same weight of (prior_weight/3). If an array is provided, then the array should have three elements corresponding to the weight of denoisers in XY, YZ, and XZ planes respectively. The weight for forward model proximal map agent will be calculated as 1-sum(prior_weight) so that the sum of all agent weights are equal to 1. Each entry of prior_weight should have value between 0 and 1. sum(prior_weight) needs to be no greater than 1. - - **init_image** (*ndarray, optional*): [Default=None] Initial value of MACE reconstruction image, specified by either a scalar value or a 3D numpy array with shape (num_img_slices,num_img_rows,num_img_cols). If None, the inital value of MACE will be automatically determined by a qGGMRF reconstruction. - - **image_range** (*tuple*): [Default=None] dynamic range of reconstruction image. If None, the lower bound will be 0, and the upper bound will be determined by 95% pixel value of the qGGMRF reconstruction. If an init_image is provided, then image_range must be also provided. + - **init_image** (*ndarray, optional*): [Default=None] Initial value of MACE reconstruction image, specified by either a scalar value or a 3-D numpy array with shape (num_img_slices,num_img_rows,num_img_cols). If None, the inital value of MACE will be automatically determined by a qGGMRF reconstruction. + - **save_path** (*str, optional*): [Default=None] Path to directory that saves the intermediate results of MACE. Optional arguments inherited from ``cone3D.recon`` (with changing default value of ``max_iterations``): - **channel_offset** (*float, optional*): [Default=0.0] Distance in :math:`ALU` from center of detector to the source-detector line along a row. - **row_offset** (*float, optional*): [Default=0.0] Distance in :math:`ALU` from center of detector to the source-detector line along a column. @@ -110,7 +91,7 @@ def mace3D(sino, angles, dist_source_detector, magnification, - **ror_radius** (*float, optional*): [Default=None] Scalar value of radius of reconstruction in :math:`ALU`. If None, automatically set with compute_img_params. Pixels outside the radius ror_radius in the :math:`(x,y)` plane are disregarded in the reconstruction. - **sigma_y** (*float, optional*): [Default=None] Scalar value of noise standard deviation parameter. If None, automatically set with auto_sigma_y. - **snr_db** (*float, optional*): [Default=30.0] Scalar value that controls assumed signal-to-noise ratio of the data in dB. Ignored if sigma_y is not None. - - **weights** (*ndarray, optional*): [Default=None] 3D weights array with same shape as sino. + - **weights** (*ndarray, optional*): [Default=None] 3-D weights array with same shape as sino. - **weight_type** (*string, optional*): [Default='unweighted'] Type of noise model used for data. If the ``weights`` array is not supplied, then the function ``cone3D.calc_weights`` is used to set weights using specified ``weight_type`` parameter. - Option "unweighted" corresponds to unweighted reconstruction; @@ -133,7 +114,7 @@ def mace3D(sino, angles, dist_source_detector, magnification, - **lib_path** (*str, optional*): [Default=~/.cache/mbircone] Path to directory containing library of forward projection matrices. Returns: - 3D numpy array: 3D reconstruction with shape (num_img_slices, num_img_rows, num_img_cols) in units of :math:`ALU^{-1}`. + 3-D numpy array: 3-D reconstruction with shape (num_img_slices, num_img_rows, num_img_cols) in units of :math:`ALU^{-1}`. """ if verbose: @@ -152,9 +133,6 @@ def mace3D(sino, angles, dist_source_detector, magnification, # Calculate automatic value of sigma_p if sigma_p is None: sigma_p = cone3D.auto_sigma_p(sino, delta_pixel_detector=delta_pixel_detector, sharpness=sharpness) - # Calculate automatic value of sigma_x - if sigma_x is None: - sigma_x = cone3D.auto_sigma_x(sino, delta_pixel_detector=delta_pixel_detector, sharpness=sharpness) if init_image is None: if verbose: start = time.time() @@ -162,25 +140,18 @@ def mace3D(sino, angles, dist_source_detector, magnification, init_image = cone3D.recon(sino, angles, dist_source_detector, magnification, channel_offset=channel_offset, row_offset=row_offset, rotation_offset=rotation_offset, delta_pixel_detector=delta_pixel_detector, delta_pixel_image=delta_pixel_image, ror_radius=ror_radius, - sigma_y=sigma_y, weights=weights, + weights=weights, sigma_y=sigma_y, sigma_x=sigma_x, positivity=positivity, p=p, q=q, T=T, num_neighbors=num_neighbors, - sigma_x=sigma_x, stop_threshold=stop_threshold, + stop_threshold=stop_threshold, num_threads=num_threads, NHICD=NHICD, verbose=qGGMRF_verbose, lib_path=lib_path) if verbose: end = time.time() elapsed_t = end-start print(f"Done computing qGGMRF reconstruction. Elapsed time: {elapsed_t:.2f} sec.") - if image_range is None: - if verbose: - print("image dynamic range automatically determined by qGGMRF reconstruction.") - image_range_upper = np.percentile(init_image, 95) - image_range = [0, image_range_upper] - if verbose: - print("image dynamic range = ",image_range) - # Throw an exception if image_range is None and init_image is not None. - assert not (image_range is None), \ - 'image_range needs to be provided if an init_image is given to MACE algorithm.' - + if not (save_path is None): + print("Save qGGMRF reconstruction to disk.") + np.save(os.path.join(save_path, 'recon_qGGMRF.npy'), init_image) + if np.isscalar(init_image): (num_views, num_det_rows, num_det_channels) = np.shape(sino) [Nz,Nx,Ny], _ = cone3D.compute_img_size(num_views, num_det_rows, num_det_channels, @@ -204,18 +175,19 @@ def mace3D(sino, angles, dist_source_detector, magnification, beta.append(w) else: beta = [1-prior_weight,prior_weight/(image_dim),prior_weight/(image_dim),prior_weight/(image_dim)] - assert(all(w>=0 for w in beta)), 'Incorrect value of prior_weight given. All elements in prior_weight should be non-negative, and sum should be no greater than 1.' + # check that agent weights are all non-negative and sum up to 1. + assert(all(w>=0 for w in beta) and (sum(beta)-1.)<1e-5), 'Incorrect value of prior_weight given. All elements in prior_weight should be non-negative, and sum should be no greater than 1.' # make denoiser_args an instance if necessary if not isinstance(denoiser_args, tuple): denoiser_args = (denoiser_args,) - #################### begin ADMM iterations + ######################## begin ADMM iterations ######################## if verbose: print("Begin MACE ADMM iterations:") for itr in range(max_admm_itr): if verbose: print(f"Begin MACE iteration {itr}/{max_admm_itr}:") - start = time.time() + itr_start = time.time() # forward model prox map agent X[0] = cone3D.recon(sino, angles, dist_source_detector, magnification, channel_offset=channel_offset, row_offset=row_offset, rotation_offset=rotation_offset, @@ -226,29 +198,257 @@ def mace3D(sino, angles, dist_source_detector, magnification, sigma_p=sigma_p, max_iterations=max_iterations, stop_threshold=stop_threshold, num_threads=num_threads, NHICD=NHICD, verbose=qGGMRF_verbose, lib_path=lib_path) if verbose: - print(" Done forward model proximal map estimation.") + print("Done forward model proximal map estimation.") # prior model denoiser agents + denoise_start = time.time() # denoising in XY plane (along Z-axis) - X[1] = denoiser_wrapper(W[1], denoiser, denoiser_args, image_range, permute_vector=(0,1,2), positivity=positivity) - if verbose: - print(" Done denoising in XY-plane.") + X[1] = denoiser_wrapper(W[1], denoiser, denoiser_args, permute_vector=(0,1,2), positivity=positivity) # denoising in YZ plane (along X-axis) - X[2] = denoiser_wrapper(W[2], denoiser, denoiser_args, image_range, permute_vector=(1,0,2), positivity=positivity) - if verbose: - print(" Done denoising in YZ-plane.") + X[2] = denoiser_wrapper(W[2], denoiser, denoiser_args, permute_vector=(1,0,2), positivity=positivity) # denoising in XZ plane (along Y-axis) - X[3] = denoiser_wrapper(W[3], denoiser, denoiser_args, image_range, permute_vector=(2,0,1), positivity=positivity) + X[3] = denoiser_wrapper(W[3], denoiser, denoiser_args, permute_vector=(2,0,1), positivity=positivity) + denoise_end = time.time() if verbose: - print(" Done denoising in XZ-plane.") + denoise_elapsed = denoise_end - denoise_start + print(f"Done denoising in all hyper-planes, elapsed time {denoise_elapsed:.2f} sec") + if not (save_path is None): + for i in range(4): + np.save(os.path.join(save_path, f'X{i}_itr{itr}.npy'), X[i]) + np.save(os.path.join(save_path, f'W{i}_itr{itr}.npy'), W[i]) + Z = sum([beta[k]*(2*X[k]-W[k]) for k in range(image_dim+1)]) for k in range(image_dim+1): W[k] += 2*rho*(Z-X[k]) recon = sum([beta[k]*X[k] for k in range(image_dim+1)]) if verbose: - end = time.time() - elapsed_t = end-start - print(f"Done MACE iteration. Elapsed time: {elapsed_t:.2f} sec.") - #################### end ADMM iterations + itr_end = time.time() + itr_elapsed = itr_end-itr_start + print(f"Done MACE iteration. Elapsed time: {itr_elapsed:.2f} sec.") + ######################## end ADMM iterations ######################## print("Done MACE reconstruction.") return recon + +def mace4D(sino, angles, dist_source_detector, magnification, + denoiser, denoiser_args=(), + max_admm_itr=10, rho=0.5, prior_weight=0.5, + init_image=None, save_path=None, + cluster_ticket=None, + channel_offset=0.0, row_offset=0.0, rotation_offset=0.0, + delta_pixel_detector=1.0, delta_pixel_image=None, ror_radius=None, + sigma_y=None, snr_db=30.0, weights=None, weight_type='unweighted', + positivity=True, p=1.2, q=2.0, T=1.0, num_neighbors=6, + sharpness=0.0, sigma_x=None, sigma_p=None, max_iterations=3, stop_threshold=0.02, + num_threads=None, NHICD=False, verbose=1, + lib_path=__lib_path): + """Computes 4-D conebeam beam reconstruction with multi-slice MACE alogorithm by fusing forward model proximal map with 2.5-D denoisers across XY-t, XZ-t, and YZ-t hyperplanes. + + Required arguments: + - **sino** (*list[ndarray]*): list of 3-D sinogram array. The length of sino is equal to num_time_points, where sino[t] is a 3-D array with shape (num_views, num_det_rows, num_det_channels), specifying sinogram of time point t. + - **angles** (*list[list]*): List of view angles in radians. The length of angles is equal to num_time_points, where angles[t] is a 1D array specifying view angles of time point t. + - **dist_source_detector** (*float*): Distance between the X-ray source and the detector in units of ALU + - **magnification** (*float*): Magnification of the cone-beam geometry defined as (source to detector distance)/(source to center-of-rotation distance). + Arguments specific to MACE reconstruction algorithm: + - **denoiser** (*callable*): The denoiser function used as the prior agent in MACE. + + ``denoiser(x, *denoiser_args) -> ndarray`` + + where ``x`` is a 4-D array of the noisy image volume with shape :math:`(N_{batch}, N_t, N_1, N_2)`, where the 2.5-D denoising hyper-plane is defined by :math:`(N_t, N_1, N_2)`. + + ``denoiser_args`` is a tuple of the fixed parameters needed to completely specify the denoiser function. + + The denoiser function should return a 4-D array of the denoised image, where the shape of the denoised image volume is the same as shape of the noisy image volume ``x``. + + The same ``denoiser`` function is used to by all three denoising agents corresponding to XY-t, YZ-t and XZ-t hyperplanes. For each of the three denoising agents in MACE4D, the input noisy image volume will be permuted before fed into ``denoiser``, s.t. after permutation, :math:`(N_t, N_1, N_2)` corresponds to the denoising hyper-plane of the agent. + + - **denoiser_args** (*tuple*): [Default=()] Extra arguments passed to the denoiser function. + - **max_admm_itr** (*int*): [Default=10] Maximum number of MACE ADMM iterations. + - **rho** (*float*): [Default=0.5] step size of ADMM update in MACE, range (0,1). The value of ``rho`` mainly controls the convergence speed of MACE algorithm. + - **prior_weight** (*ndarray*): [Default=0.5] weights for prior agents, specified by either a scalar value or a 1D array. If a scalar is specified, then all three prior agents use the same weight of (prior_weight/3). If an array is provided, then the array should have three elements corresponding to the weight of denoisers in XY-t, YZ-t, and XZ-t hyperplanes respectively. The weight for forward model proximal map agent will be calculated as 1-sum(prior_weight) so that the sum of all agent weights are equal to 1. Each entry of prior_weight should have value between 0 and 1. sum(prior_weight) needs to be no greater than 1. + - **init_image** (*ndarray, optional*): [Default=None] Initial value of MACE reconstruction image, specified by either a scalar value, a 3-D numpy array with shape (num_img_slices,num_img_rows,num_img_cols), or a 4-D numpy array with shape (num_time_points, num_img_slices,num_img_rows,num_img_cols). If None, the inital value of MACE will be automatically determined by a stack of 3-D qGGMRF reconstructions at different time points. + - **save_path** (*str, optional*): [Default=None] Path to directory that saves the intermediate results of MACE. If not None, the inital qGGMRF reconstruction and input/output images of all agents from each MACE iteration will be saved to ``save_path``. + Arguments specific to multi-node computation: + - **cluster_ticket** (*Object*): [Default=None] A ticket used to access a specific cluster, that can be obtained from ``multinode.get_cluster_ticket``. If cluster_ticket is None, the process will run in serial. See `dask_jobqueue <https://jobqueue.dask.org/en/latest/api.html>`_ for more information. + Optional arguments inherited from ``cone3D.recon`` (with changing default value of ``max_iterations``): + - **channel_offset** (*float, optional*): [Default=0.0] Distance in :math:`ALU` from center of detector to the source-detector line along a row. + - **row_offset** (*float, optional*): [Default=0.0] Distance in :math:`ALU` from center of detector to the source-detector line along a column. + - **rotation_offset** (*float, optional*): [Default=0.0] Distance in :math:`ALU` from source-detector line to axis of rotation in the object space. This is normally set to zero. + - **delta_pixel_detector** (*float, optional*): [Default=1.0] Scalar value of detector pixel spacing in :math:`ALU`. + - **delta_pixel_image** (*float, optional*): [Default=None] Scalar value of image pixel spacing in :math:`ALU`. If None, automatically set to delta_pixel_detector/magnification + - **ror_radius** (*float, optional*): [Default=None] Scalar value of radius of reconstruction in :math:`ALU`. If None, automatically set with compute_img_params. Pixels outside the radius ror_radius in the :math:`(x,y)` plane are disregarded in the reconstruction. + - **sigma_y** (*float, optional*): [Default=None] Scalar value of noise standard deviation parameter. If None, automatically set with auto_sigma_y. + - **snr_db** (*float, optional*): [Default=30.0] Scalar value that controls assumed signal-to-noise ratio of the data in dB. Ignored if sigma_y is not None. + - **weights** (*list[ndarray], optional*): [Default=None] List of 3-D weights array with same shape as sino. + - **weight_type** (*string, optional*): [Default='unweighted'] Type of noise model used for data. If the ``weights`` array is not supplied, then the function ``cone3D.calc_weights`` is used to set weights using specified ``weight_type`` parameter. + + - Option "unweighted" corresponds to unweighted reconstruction; + - Option "transmission" is the correct weighting for transmission CT with constant dosage; + - Option "transmission_root" is commonly used with transmission CT data to improve image homogeneity; + - Option "emission" is appropriate for emission CT data. + - **positivity** (*bool, optional*): [Default=True] Boolean value that determines if positivity constraint is enforced. The positivity parameter defaults to True; however, it should be changed to False when used in applications that can generate negative image values. + - **p** (*float, optional*): [Default=1.2] Scalar value in range :math:`[1,2]` that specifies the qGGMRF shape parameter. + - **q** (*float, optional*): [Default=2.0] Scalar value in range :math:`[p,1]` that specifies the qGGMRF shape parameter. + - **T** (*float, optional*): [Default=1.0] Scalar value :math:`>0` that specifies the qGGMRF threshold parameter. + - **num_neighbors** (*int, optional*): [Default=6] Possible values are {26,18,6}. Number of neightbors in the qggmrf neighborhood. Higher number of neighbors result in a better regularization but a slower reconstruction. + - **sharpness** (*float, optional*): [Default=0.0] Scalar value that controls level of sharpness in the reconstruction. ``sharpness=0.0`` is neutral; ``sharpness>0`` increases sharpness; ``sharpness<0`` reduces sharpness. Ignored in qGGMRF reconstruction if ``sigma_x`` is not None, or in proximal map estimation if ``sigma_p`` is not None. + - **sigma_x** (*ndarray, optional*): [Default=None] Scalar value :math:`>0` that specifies the qGGMRF scale parameter. If None, automatically set with auto_sigma_x for each time point. Regularization should be controled with the ``sharpness`` parameter, but ``sigma_x`` can be set directly by expert users. + - **sigma_p** (*float, optional*): [Default=None] Scalar value :math:`>0` that specifies the proximal map parameter. If None, automatically set with auto_sigma_p. Regularization should be controled with the ``sharpness`` parameter, but ``sigma_p`` can be set directly by expert users. + - **max_iterations** (*int, optional*): [Default=3] Integer valued specifying the maximum number of iterations for proximal map estimation. + - **stop_threshold** (*float, optional*): [Default=0.02] Scalar valued stopping threshold in percent. If stop_threshold=0.0, then run max iterations. + - **num_threads** (*int, optional*): [Default=None] Number of compute threads requested when executed. If None, num_threads is set to the number of cores in the system + - **NHICD** (*bool, optional*): [Default=False] If true, uses Non-homogeneous ICD updates + - **verbose** (*int, optional*): [Default=1] Possible values are {0,1,2}, where 0 is quiet, 1 prints MACE reconstruction progress information, and 2 prints the MACE reconstruction as well as qGGMRF/proximal-map reconstruction and multinode computation information. + - **lib_path** (*str, optional*): [Default=~/.cache/mbircone] Path to directory containing library of forward projection matrices. + + Returns: + 4-D numpy array: 4-D reconstruction with shape (num_time_points, num_img_slices, num_img_rows, num_img_cols) in units of :math:`ALU^{-1}`. + """ + + if verbose: + print("initializing MACE...") + # verbosity level for qGGMRF recon + qGGMRF_verbose = max(0,verbose-1) + + # agent weight + if isinstance(prior_weight, (list, tuple, np.ndarray)): + assert (len(prior_weight)==3), 'Incorrect dimensionality of prior_weight array.' + beta = [1-sum(prior_weight)] + for w in prior_weight: + beta.append(w) + else: + beta = [1.-prior_weight,prior_weight/3.,prior_weight/3.,prior_weight/3.] + assert(all(w>=0 for w in beta) and (sum(beta)-1.)<1e-5), 'Incorrect value of prior_weight given. All elements in prior_weight should be non-negative, and sum should be no greater than 1.' + + # make denoiser_args an instance if necessary + if not isinstance(denoiser_args, tuple): + denoiser_args = (denoiser_args,) + + # get sino shape + (Nt, num_views, num_det_rows, num_det_channels) = np.shape(sino) + # if angles is a 1D array, form the 2D angles array s.t. same set of angles are used at every time point. + if np.ndim(angles) == 1: + angles = [angles for _ in range(Nt)] + # Calculate automatic value of sinogram weights + if weights is None: + weights = cone3D.calc_weights(sino,weight_type) + # Calculate automatic value of delta_pixel_image + if delta_pixel_image is None: + delta_pixel_image = delta_pixel_detector/magnification + # Calculate automatic value of sigma_y + if sigma_y is None: + sigma_y = cone3D.auto_sigma_y(sino, weights, snr_db=snr_db, delta_pixel_image=delta_pixel_image, delta_pixel_detector=delta_pixel_detector) + # Calculate automatic value of sigma_p + if sigma_p is None: + sigma_p = cone3D.auto_sigma_p(sino, delta_pixel_detector=delta_pixel_detector, sharpness=sharpness) + # Fixed args dictionary used for multi-node parallelization + constant_args = {'dist_source_detector':dist_source_detector, 'magnification':magnification, + 'channel_offset':channel_offset, 'row_offset':row_offset, 'rotation_offset':rotation_offset, + 'delta_pixel_detector':delta_pixel_detector, 'delta_pixel_image':delta_pixel_image, 'ror_radius':ror_radius, + 'sigma_y':sigma_y, 'sigma_p':sigma_p, 'sigma_x':sigma_x, + 'positivity':positivity, 'p':p, 'q':q, 'T':T, 'num_neighbors':num_neighbors, + 'max_iterations':20, 'stop_threshold':stop_threshold, + 'num_threads':num_threads, 'NHICD':NHICD, 'verbose':qGGMRF_verbose, 'lib_path':lib_path + } + # List of variable args dictionaries used for multi-node parallelization + variable_args_list = [{'sino': sino[t], 'angles':angles[t], 'weights':weights[t]} + for t in range(Nt)] + # if init_image is not provided, use qGGMRF recon result as init_image. + if init_image is None: + if verbose: + start = time.time() + print("Computing qGGMRF reconstruction at all time points. This will be used as MACE initialization point.") + + if not (cluster_ticket is None): + init_image = np.array(multinode.scatter_gather(cluster_ticket, cone3D.recon, + variable_args_list=variable_args_list, + constant_args=constant_args, + verbose=qGGMRF_verbose)) + else: + init_image = np.array([cone3D.recon(sino[t], angles[t], dist_source_detector, magnification, + channel_offset=channel_offset, row_offset=row_offset, rotation_offset=rotation_offset, + delta_pixel_detector=delta_pixel_detector, delta_pixel_image=delta_pixel_image, ror_radius=ror_radius, + weights=weights[t], sigma_y=sigma_y, sigma_x=sigma_x, + positivity=positivity, p=p, q=q, T=T, num_neighbors=num_neighbors, + max_iterations=20, stop_threshold=stop_threshold, + num_threads=num_threads, NHICD=NHICD, verbose=qGGMRF_verbose, lib_path=lib_path) for t in range(Nt)]) + if verbose: + end = time.time() + elapsed_t = end-start + print(f"Done computing qGGMRF reconstruction. Elapsed time: {elapsed_t:.2f} sec.") + if not (save_path is None): + print("Save qGGMRF reconstruction to disk.") + np.save(os.path.join(save_path, 'recon_qGGMRF.npy'), init_image) + + if np.isscalar(init_image): + [Nz,Nx,Ny], _ = cone3D.compute_img_size(num_views, num_det_rows, num_det_channels, + dist_source_detector, magnification, + channel_offset=channel_offset, row_offset=row_offset, rotation_offset=rotation_offset, + delta_pixel_detector=delta_pixel_detector, delta_pixel_image=delta_pixel_image, ror_radius=ror_radius) + init_image = np.zeros((Nt, Nz, Nx, Ny)) + init_image + elif np.ndim(init_image) == 3: + init_image = np.array([init_image for _ in range(Nt)]) + else: + [_,Nz,Nx,Ny] = np.shape(init_image) + + # number of agents = image dimensionality. + W = [np.copy(init_image) for _ in range(4)] + X = [np.copy(init_image) for _ in range(4)] + + ######################## begin ADMM iterations ######################## + if verbose: + print("Begin MACE ADMM iterations:") + for itr in range(max_admm_itr): + if verbose: + print(f"Begin MACE iteration {itr}/{max_admm_itr}:") + itr_start = time.time() + # Modify constant_args and variable args respectively for proximal map estimation. + constant_args['max_iterations'] = max_iterations + for t in range(Nt): + variable_args_list[t]['init_image'] = X[0][t] + variable_args_list[t]['prox_image'] = W[0][t] + # forward model prox map agent + if not (cluster_ticket is None): + X[0] = np.array(multinode.scatter_gather(cluster_ticket, cone3D.recon, + variable_args_list=variable_args_list, + constant_args=constant_args, + verbose=qGGMRF_verbose)) + else: + X[0] = np.array([cone3D.recon(sino[t], angles[t], dist_source_detector, magnification, + channel_offset=channel_offset, row_offset=row_offset, rotation_offset=rotation_offset, + delta_pixel_detector=delta_pixel_detector, delta_pixel_image=delta_pixel_image, ror_radius=ror_radius, + init_image=X[0][t], prox_image=W[0][t], + weights=weights, sigma_y=sigma_y, sigma_p=sigma_p, + positivity=positivity, + max_iterations=max_iterations, stop_threshold=stop_threshold, + num_threads=num_threads, NHICD=NHICD, verbose=qGGMRF_verbose, lib_path=lib_path) for t in range(Nt)]) + if verbose: + print("Done forward model proximal map estimation.") + # prior model denoiser agents + # denoising in XY plane (along Z-axis) + denoise_start = time.time() + X[1] = denoiser_wrapper(W[1], denoiser, denoiser_args, permute_vector=(1,0,2,3), positivity=positivity) # shape should be after permutation (Nz,Nt,Nx,Ny) + # denoising in YZ plane (along X-axis) + X[2] = denoiser_wrapper(W[2], denoiser, denoiser_args, permute_vector=(2,0,1,3), positivity=positivity) # shape should be after permutation (Nx,Nt,Nz,Ny) + # denoising in XZ plane (along Y-axis) + X[3] = denoiser_wrapper(W[3], denoiser, denoiser_args, permute_vector=(3,0,1,2), positivity=positivity) # shape should be after permutation (Ny,Nt,Nz,Nx) + denoise_end = time.time() + if verbose: + denoise_elapsed = denoise_end - denoise_start + print(f"Done denoising in all hyper-planes, elapsed time {denoise_elapsed:.2f} sec") + # save X and W as npy files + if not (save_path is None): + for i in range(4): + np.save(os.path.join(save_path, f'X{i}_itr{itr}.npy'), X[i]) + np.save(os.path.join(save_path, f'W{i}_itr{itr}.npy'), W[i]) + + Z = sum([beta[k]*(2*X[k]-W[k]) for k in range(4)]) + for k in range(4): + W[k] += 2*rho*(Z-X[k]) + if verbose: + itr_end = time.time() + itr_elapsed = itr_end-itr_start + print(f"Done MACE iteration {itr}/{max_admm_itr}. Elapsed time: {itr_elapsed:.2f} sec.") + ######################## end ADMM iterations ######################## + print("Done MACE reconstruction!") + recon = sum([beta[k]*X[k] for k in range(4)]) + return recon diff --git a/mbircone/multinode.py b/mbircone/multinode.py index 55f5641378fffe041b5679d581c365b618048ab3..3363d5b0b7fa5fab6863b1d2f38c1c29e0b35508 100644 --- a/mbircone/multinode.py +++ b/mbircone/multinode.py @@ -224,8 +224,7 @@ def scatter_gather(cluster_ticket, func, constant_args={}, variable_args_list=[] # Print a warning message when the waiting time is too long per 120 seconds. if loop_time % 120 == 0 and loop_time >= 120: - print('Already waiting nodes for %d seconds. You can cancel the job by Ctrl+C' % (loop_time * wait_times)) - + print('Already waited for %d seconds. To minimize the wait time, you may cancel the current job by pressing Ctrl+C, and submit a new job with less number of requested nodes or less memory per node.' % (loop_time * wait_times)) if nb_workers >= min_nodes: print('Got %d nodes, start parallel computation.' % nb_workers) break diff --git a/requirements.txt b/requirements.txt index 7b300c7b265f9dcce1ab571c09ad315515996499..a49504c23e123f965d03adc1b7c9a70bcd0510fe 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ numpy~=1.19.5 Cython~=0.29.24 psutil~=5.8.0 -Pillow~=8.4.0 +Pillow>=9.1 dask~=2021.11.2 dask-jobqueue~=0.7.3