marta-marta commited on
Commit
8ce1c5e
·
1 Parent(s): c4ca994

Updating to get the model imports to work

Browse files
Files changed (1) hide show
  1. app.py +65 -12
app.py CHANGED
@@ -1,18 +1,17 @@
1
  import numpy as np
2
  from scipy import signal
3
  import math
4
- # import huggingface_hub # for loading model
5
  from huggingface_hub import from_pretrained_keras
6
  import streamlit as st
7
- # from transformers import AutoModel
8
- # from transformers import TFAutoModel
9
- # Needed for importing torch to use in the transformers model
10
- # import torch
11
  import tensorflow
12
- import matplotlib.pyplot as plt
13
- # HELLO HUGGING FACE
14
 
15
 
 
 
 
 
16
  def basic_box_array(image_size):
17
  A = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
18
  # Creates the outside edges of the box
@@ -98,6 +97,8 @@ def add_pixels(array_original, additional_pixels, image_size):
98
  return A
99
 
100
 
 
 
101
  def basic_box(additional_pixels, density, image_size):
102
  A = basic_box_array(image_size) # Creates the outside edges of the box
103
  # Increase the thickness of each part of the box
@@ -269,23 +270,75 @@ thickness_2 = st.selectbox("Thickness 2", thickness_options)
269
  interp_length = st.selectbox("Interpolation Length", interpolation_options)
270
 
271
 
 
272
  def generate_unit_cell(shape, density, thickness):
273
  return globals()[shape](int(thickness), float(density), 28)
274
 
 
 
 
 
 
 
275
  if st.button("Generate Endpoint Images"):
276
  plt.figure(1)
277
  st.header("Endpoints to be generated:")
278
- plt.subplot(1, 2, 1), plt.imshow(generate_unit_cell(shape_1, density_1, thickness_1), cmap='gray', vmin=0, vmax=1)
279
- plt.subplot(1, 2, 2), plt.imshow(generate_unit_cell(shape_2, density_2, thickness_2), cmap='gray', vmin=0, vmax=1)
280
  plt.figure(1)
281
  st.pyplot(plt.figure(1))
282
-
283
  # Load the models from existing huggingface model
284
  # Load the encoder model
285
  encoder_model_boxes = from_pretrained_keras("cmudrc/2d-lattice-encoder")
286
- # encoder_model = TFAutoModel.from_pretrained("cmudrc/2d-lattice-encoder")
287
  # Load the decoder model
288
  decoder_model_boxes = from_pretrained_keras("cmudrc/2d-lattice-decoder")
289
- # decoder_model = TFAutoModel.from_pretrained("cmudrc/2d-lattice-decoder")
290
 
291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import numpy as np
2
  from scipy import signal
3
  import math
4
+ import matplotlib.pyplot as plt
5
  from huggingface_hub import from_pretrained_keras
6
  import streamlit as st
7
+ # Needed in requirements.txt for importing to use in the transformers model
 
 
 
8
  import tensorflow
 
 
9
 
10
 
11
+ # HELLO HUGGING FACE
12
+
13
+ ########################################################################################################################
14
+ # Define the piecewise functions to create each of the possible shapes
15
  def basic_box_array(image_size):
16
  A = np.zeros((int(image_size), int(image_size))) # Initializes A matrix with 0 values
17
  # Creates the outside edges of the box
 
97
  return A
98
 
99
 
100
+ ########################################################################################################################
101
+ # Create the desired shape using the density and thickness
102
  def basic_box(additional_pixels, density, image_size):
103
  A = basic_box_array(image_size) # Creates the outside edges of the box
104
  # Increase the thickness of each part of the box
 
270
  interp_length = st.selectbox("Interpolation Length", interpolation_options)
271
 
272
 
273
+ # Define the function to generate unit cells based on user inputs
274
  def generate_unit_cell(shape, density, thickness):
275
  return globals()[shape](int(thickness), float(density), 28)
276
 
277
+
278
+ # Generate the endpoints
279
+ number_1 = generate_unit_cell(shape_1, density_1, thickness_1)
280
+ number_2 = generate_unit_cell(shape_2, density_2, thickness_2)
281
+
282
+ # Display the endpoints to the user
283
  if st.button("Generate Endpoint Images"):
284
  plt.figure(1)
285
  st.header("Endpoints to be generated:")
286
+ plt.subplot(1, 2, 1), plt.imshow(number_1, cmap='gray', vmin=0, vmax=1)
287
+ plt.subplot(1, 2, 2), plt.imshow(number_2, cmap='gray', vmin=0, vmax=1)
288
  plt.figure(1)
289
  st.pyplot(plt.figure(1))
290
+ ########################################################################################################################
291
  # Load the models from existing huggingface model
292
  # Load the encoder model
293
  encoder_model_boxes = from_pretrained_keras("cmudrc/2d-lattice-encoder")
294
+
295
  # Load the decoder model
296
  decoder_model_boxes = from_pretrained_keras("cmudrc/2d-lattice-decoder")
 
297
 
298
 
299
+ ########################################################################################################################
300
+ # Encode the Desired Endpoints
301
+ # resize the array to match the prediction size requirement
302
+ number_1_expand = np.expand_dims(np.expand_dims(number_1, axis=2), axis=0)
303
+ number_2_expand = np.expand_dims(np.expand_dims(number_2, axis=2), axis=0)
304
+
305
+ # Determine the latent point that will represent our desired number
306
+ latent_point_1 = encoder_model_boxes.predict(number_1_expand)[0]
307
+ latent_point_2 = encoder_model_boxes.predict(number_2_expand)[0]
308
+
309
+ latent_dimensionality = len(latent_point_1) # define the dimensionality of the latent space
310
+ ########################################################################################################################
311
+ # Establish the Framework for a LINEAR Interpolation
312
+ number_internal = 8 # the number of interpolations that the model will find between two points
313
+ num_interp = number_internal + 2 # the number of images to be pictured
314
+ latent_matrix = [] # This will contain the latent points of the interpolation
315
+ for column in range(latent_dimensionality):
316
+ new_column = np.linspace(latent_point_1[column], latent_point_2[column], num_interp)
317
+ latent_matrix.append(new_column)
318
+ latent_matrix = np.array(latent_matrix).T # Transposes the matrix so that each row can be easily indexed
319
+ ########################################################################################################################
320
+ # Plotting the Interpolation in 2D Using Chosen Points
321
+ if st.button("Generate Interpolation:"):
322
+ plt.figure(2)
323
+ plot_rows = 2
324
+ plot_columns = num_interp + 2
325
+
326
+ # Plot the First Interpolation Point
327
+ plt.subplot(plot_rows, plot_columns, 1), plt.imshow(number_1, cmap='gray', vmin=0, vmax=1)
328
+ # plt.title("First Interpolation Point:\n" + str(box_shape_test[number_1]) + "\nPixel Density: " + str(
329
+ # box_density_test[number_1]) + "\nAdditional Pixels: " + str(additional_pixels_test[number_1]))
330
+
331
+ predicted_interps = [] # Used to store the predicted interpolations
332
+ # Interpolate the Images and Print out to User
333
+ for latent_point in range(2, num_interp + 2): # cycles the latent points through the decoder model to create images
334
+ generated_image = decoder_model_boxes.predict(np.array([latent_matrix[latent_point - 2]]))[0] # generates an interpolated image based on the latent point
335
+ predicted_interps.append(generated_image[:, :, -1])
336
+ plt.subplot(plot_rows, plot_columns, latent_point), plt.imshow(generated_image[:, :, -1], cmap='gray', vmin=0, vmax=1)
337
+ # plt.axis('off')
338
+
339
+ # Plot the Second Interpolation Point
340
+ plt.subplot(plot_rows, plot_columns, num_interp + 2), plt.imshow(number_2, cmap='gray', vmin=0, vmax=1)
341
+ # plt.title("Second Interpolation Point:\n" + str(box_shape_test[number_2]) + "\nPixel Density: " + str(
342
+ # box_density_test[number_2]) + "\nAdditional Pixels: " + str(additional_pixels_test[number_2])) # + "\nPredicted Latent Point 2: " + str(latent_point_2)
343
+ plt.figure(2)
344
+ st.pyplot(plt.figure(2))