Skip to content

Commit

Permalink
Merge pull request #26 from lakabuli/main
Browse files Browse the repository at this point in the history
lensless imaging items
  • Loading branch information
lakabuli authored Aug 29, 2024
2 parents a8a08a6 + 96fdfe7 commit 3fc6204
Show file tree
Hide file tree
Showing 19 changed files with 4,285 additions and 269,337 deletions.
253,894 changes: 0 additions & 253,894 deletions imager_experiments/02_10_2024_make_lenses_3D_edges_and_shear.ipynb

This file was deleted.

291 changes: 0 additions & 291 deletions imager_experiments/9_20_2023_PSF_FOV.ipynb

This file was deleted.

925 changes: 0 additions & 925 deletions imager_experiments/9_25_2023_compute_mutual_information.ipynb

This file was deleted.

7,066 changes: 0 additions & 7,066 deletions imager_experiments/9_25_2023_compute_mutual_information_sweep_noise.ipynb

This file was deleted.

7,156 changes: 0 additions & 7,156 deletions imager_experiments/9_26_2023_compute_mi_photon_bias.ipynb

This file was deleted.

Large diffs are not rendered by default.

175 changes: 175 additions & 0 deletions lensless_imager/01_17_2024_pixelcnn_cifar10_all_lenses.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "2f0168a5",
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"import os\n",
"from jax import config\n",
"config.update(\"jax_enable_x64\", True)\n",
"import sys\n",
"sys.path.insert(0, '/home/lkabuli_waller/workspace/EncodingInformation/')\n",
"sys.path.append('/home/lkabuli_waller/workspace/EncodingInformation/imager_experiments/')\n",
"\n",
"os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" \n",
"os.environ[\"CUDA_VISIBLE_DEVICES\"] = '2'\n",
"from encoding_information.gpu_utils import limit_gpu_memory_growth\n",
"limit_gpu_memory_growth()\n",
"\n",
"# import tensorflow_datasets as tfds # TFDS for MNIST #TODO INSTALL AGAIN LATER\n",
"#import tensorflow as tf # TensorFlow operations\n",
"\n",
"\n",
"\n",
"# from image_distribution_models import PixelCNN\n",
"\n",
"from cleanplots import *\n",
"import jax.numpy as np\n",
"from jax.scipy.special import logsumexp\n",
"import numpy as onp\n",
"\n",
"from leyla_fns import *"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "34552381",
"metadata": {},
"outputs": [],
"source": [
"from encoding_information.image_utils import add_noise, extract_patches\n",
"from encoding_information.models.gaussian_process import StationaryGaussianProcess\n",
"from encoding_information.models.pixel_cnn import PixelCNN\n",
"from encoding_information.information_estimation import estimate_mutual_information"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Sweep Photon Count and Diffusers"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# load the PSFs\n",
"\n",
"one_psf = load_single_lens_uniform(32)\n",
"two_psf = load_two_lens_uniform(32)\n",
"three_psf = load_three_lens_uniform(32)\n",
"four_psf = load_four_lens_uniform(32)\n",
"five_psf = load_five_lens_uniform(32)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# set seed values for reproducibility\n",
"seed_values_full = np.arange(1, 5)\n",
"\n",
"# set photon properties \n",
"bias = 10 # in photons\n",
"mean_photon_count_list = [20, 40, 60, 80, 100, 150, 200, 250, 300]\n",
"\n",
"# set eligible psfs\n",
"\n",
"# psf_patterns = [None, one_psf, four_psf, diffuser_psf]\n",
"# psf_names = ['uc', 'one', 'four', 'diffuser']\n",
"psf_patterns = [one_psf, two_psf, three_psf, four_psf, five_psf]\n",
"psf_names = ['one', 'two', 'three', 'four', 'five']\n",
"# MI estimator parameters \n",
"patch_size = 32\n",
"num_patches = 10000\n",
"bs = 500\n",
"max_epochs = 50"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for photon_count in mean_photon_count_list:\n",
" for index, psf_pattern in enumerate(psf_patterns):\n",
" gaussian_mi_estimates = []\n",
" pixelcnn_mi_estimates = []\n",
" print('Mean photon count: {}, PSF: {}'.format(photon_count, psf_names[index]))\n",
" for seed_value in seed_values_full:\n",
" # load dataset\n",
" (x_train, y_train), (x_test, y_test) = tfk.datasets.cifar10.load_data()\n",
" data = onp.concatenate((x_train, x_test), axis=0) # make one big glob of data\n",
" data = data.astype(np.float32)\n",
" data /= onp.mean(data)\n",
" data *= photon_count # convert to photons with mean photon_count\n",
" labels = np.concatenate((y_train, y_test), axis=0) # make one big glob of labels. \n",
" # for CIFAR 100, need to convert images to grayscale\n",
" if len(data.shape) == 4:\n",
" data = tf.image.rgb_to_grayscale(data).numpy() # convert to grayscale\n",
" data = data.squeeze()\n",
" # make tiled data\n",
" random_data, random_labels = generate_random_tiled_data(data, labels, seed_value)\n",
" \n",
" if psf_pattern is None:\n",
" start_idx = data.shape[-1] // 2\n",
" end_idx = data.shape[-1] // 2 - 1 \n",
" psf_data = random_data[:, start_idx:-end_idx, start_idx:-end_idx]\n",
" else:\n",
" psf_data = convolved_dataset(psf_pattern, random_data)\n",
" # add bias to data \n",
" psf_data += bias\n",
" # make patches and add noise\n",
" psf_data_patch = extract_patches(psf_data, patch_size=patch_size, num_patches=num_patches, seed=seed_value)\n",
" psf_data_shot_patch = add_noise(psf_data_patch, seed=seed_value, batch_size=bs)\n",
" # compute gaussian MI estimate, use comparison clean images\n",
" mi_gaussian_psf = estimate_mutual_information(psf_data_shot_patch, clean_images=psf_data_patch, entropy_model='gaussian',\n",
" max_epochs=max_epochs, verbose=True)\n",
" # compute PixelCNN MI estimate, use comparison clean images\n",
" mi_pixelcnn_psf = estimate_mutual_information(psf_data_shot_patch, clean_images=psf_data_patch, entropy_model='pixelcnn', num_val_samples=1000,\n",
" max_epochs=max_epochs, do_lr_decay=True, verbose=True)\n",
" gaussian_mi_estimates.append(mi_gaussian_psf)\n",
" pixelcnn_mi_estimates.append(mi_pixelcnn_psf)\n",
" #np.save('cifar10_mi_estimates/pixelcnn_mi_estimate_{}_photon_count_{}_psf.npy'.format(photon_count, psf_names[index]), np.array(pixelcnn_mi_estimates))\n",
" #np.save('cifar10_mi_estimates/gaussian_mi_estimate_{}_photon_count_{}_psf.npy'.format(photon_count, psf_names[index]), np.array(gaussian_mi_estimates))\n",
" # save the results once the seeds are done, file includes photon count and psf name\n",
" #np.save('cifar10_mi_estimates/pixelcnn_mi_estimate_{}_photon_count_{}_psf.npy'.format(photon_count, psf_names[index]), np.array(pixelcnn_mi_estimates))\n",
" #np.save('cifar10_mi_estimates/gaussian_mi_estimate_{}_photon_count_{}_psf.npy'.format(photon_count, psf_names[index]), np.array(gaussian_mi_estimates))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "phenotypes",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading

0 comments on commit 3fc6204

Please sign in to comment.