Un tokeniseur basé sur SentencePièce pour les utilisations en production avec les modèles AI21
Jamba 1.5 Mini
ou Jamba 1.5 Large
, vous devrez demander l'accès au dépôt HuggingFace du modèle concerné :pip install ai21-tokenizer
poetry add ai21-tokenizer
from ai21_tokenizer import Tokenizer , PreTrainedTokenizers
tokenizer = Tokenizer . get_tokenizer ( PreTrainedTokenizers . JAMBA_1_5_MINI_TOKENIZER )
# Your code here
Une autre façon serait d'utiliser directement notre tokenizer Jamba 1.5 Mini :
from ai21_tokenizer import Jamba1_5Tokenizer
model_path = "<Path to your vocabs file>"
tokenizer = Jamba1_5Tokenizer ( model_path = model_path )
# Your code here
from ai21_tokenizer import Tokenizer , PreTrainedTokenizers
tokenizer = await Tokenizer . get_async_tokenizer ( PreTrainedTokenizers . JAMBA_1_5_MINI_TOKENIZER )
# Your code here
from ai21_tokenizer import Tokenizer , PreTrainedTokenizers
tokenizer = Tokenizer . get_tokenizer ( PreTrainedTokenizers . JAMBA_1_5_LARGE_TOKENIZER )
# Your code here
Une autre façon serait d'utiliser directement notre tokenizer Jamba 1.5 Large :
from ai21_tokenizer import Jamba1_5Tokenizer
model_path = "<Path to your vocabs file>"
tokenizer = Jamba1_5Tokenizer ( model_path = model_path )
# Your code here
from ai21_tokenizer import Tokenizer , PreTrainedTokenizers
tokenizer = await Tokenizer . get_async_tokenizer ( PreTrainedTokenizers . JAMBA_1_5_LARGE_TOKENIZER )
# Your code here
from ai21_tokenizer import Tokenizer , PreTrainedTokenizers
tokenizer = Tokenizer . get_tokenizer ( PreTrainedTokenizers . JAMBA_INSTRUCT_TOKENIZER )
# Your code here
Une autre façon serait d'utiliser directement notre tokenizer Jamba :
from ai21_tokenizer import JambaInstructTokenizer
model_path = "<Path to your vocabs file>"
tokenizer = JambaInstructTokenizer ( model_path = model_path )
# Your code here
from ai21_tokenizer import Tokenizer , PreTrainedTokenizers
tokenizer = await Tokenizer . get_async_tokenizer ( PreTrainedTokenizers . JAMBA_INSTRUCT_TOKENIZER )
# Your code here
Une autre façon serait d'utiliser notre méthode de classe asynchrone Jamba tokenizer create :
from ai21_tokenizer import AsyncJambaInstructTokenizer
model_path = "<Path to your vocabs file>"
tokenizer = AsyncJambaInstructTokenizer . create ( model_path = model_path )
# Your code here
from ai21_tokenizer import Tokenizer
tokenizer = Tokenizer . get_tokenizer ()
# Your code here
Une autre façon serait d'utiliser directement notre modèle Jurassic :
from ai21_tokenizer import JurassicTokenizer
model_path = "<Path to your vocabs file. This is usually a binary file that end with .model>"
config = {} # "dictionary object of your config.json file"
tokenizer = JurassicTokenizer ( model_path = model_path , config = config )
from ai21_tokenizer import Tokenizer
tokenizer = await Tokenizer . get_async_tokenizer ()
# Your code here
Une autre façon serait d'utiliser notre méthode de classe asynchrone Jamba tokenizer create :
from ai21_tokenizer import AsyncJurassicTokenizer
model_path = "<Path to your vocabs file. This is usually a binary file that end with .model>"
config = {} # "dictionary object of your config.json file"
tokenizer = AsyncJurassicTokenizer . create ( model_path = model_path , config = config )
# Your code here
Ces fonctions vous permettent d'encoder votre texte dans une liste d'identifiants de jetons et de revenir au texte brut
text_to_encode = "apple orange banana"
encoded_text = tokenizer . encode ( text_to_encode )
print ( f"Encoded text: { encoded_text } " )
decoded_text = tokenizer . decode ( encoded_text )
print ( f"Decoded text: { decoded_text } " )
# Assuming you have created an async tokenizer
text_to_encode = "apple orange banana"
encoded_text = await tokenizer . encode ( text_to_encode )
print ( f"Encoded text: { encoded_text } " )
decoded_text = await tokenizer . decode ( encoded_text )
print ( f"Decoded text: { decoded_text } " )
tokens = tokenizer . convert_ids_to_tokens ( encoded_text )
print ( f"IDs corresponds to Tokens: { tokens } " )
ids = tokenizer . convert_tokens_to_ids ( tokens )
# Assuming you have created an async tokenizer
tokens = await tokenizer . convert_ids_to_tokens ( encoded_text )
print ( f"IDs corresponds to Tokens: { tokens } " )
ids = tokenizer . convert_tokens_to_ids ( tokens )
Pour plus d’exemples, veuillez consulter notre dossier d’exemples.