diff --git a/examples/vision/mlp_image_classification.py b/examples/vision/mlp_image_classification.py index ab02dad38d..81d513cc74 100644 --- a/examples/vision/mlp_image_classification.py +++ b/examples/vision/mlp_image_classification.py @@ -281,7 +281,7 @@ def call(self, inputs): x_channels = keras.ops.transpose(x, axes=(0, 2, 1)) # Apply mlp1 on each channel independently. mlp1_outputs = self.mlp1(x_channels) - # Transpose mlp1_outputs from [num_batches, hidden_dim, num_patches] to [num_batches, num_patches, hidden_units]. + # Transpose mlp1_outputs from [num_batches, hidden_units, num_patches] to [num_batches, num_patches, hidden_units]. mlp1_outputs = keras.ops.transpose(mlp1_outputs, axes=(0, 2, 1)) # Add skip connection. x = mlp1_outputs + inputs diff --git a/examples/vision/perceiver_image_classification.py b/examples/vision/perceiver_image_classification.py index c55ef45a0c..95428396d9 100644 --- a/examples/vision/perceiver_image_classification.py +++ b/examples/vision/perceiver_image_classification.py @@ -134,7 +134,7 @@ def __init__(self, patch_size): def call(self, images): batch_size = ops.shape(images)[0] patches = ops.image.extract_patches( - image=images, + images=images, size=(self.patch_size, self.patch_size), strides=(self.patch_size, self.patch_size), dilation_rate=1,