diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer.py b/keras_nlp/tokenizers/sentence_piece_tokenizer.py index e8037fc331..e1ab5aeff6 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer.py @@ -174,7 +174,7 @@ def set_proto(self, proto): ) # Keras cannot serialize a bytestring, so we base64 encode the model # byte array as a string for saving. - self.proto = base64.b64encode(proto_bytes).decode("ascii") + self.proto = proto def vocabulary_size(self) -> int: """Get the size of the tokenizer vocabulary."""