mlx embeddings
1.0.0
MLX-Embeddings 是一個使用 MLX 在 Mac 上本地運行視覺和語言嵌入模型的軟體包。
您可以使用 pip 安裝 mlx-embeddings:
pip install mlx-embeddings
若要為單段文字產生嵌入:
import mlx . core as mx
from mlx_embeddings . utils import load
# Load the model and tokenizer
model , tokenizer = load ( "sentence-transformers/all-MiniLM-L6-v2" )
# Prepare the text
text = "I like reading"
# Tokenize and generate embedding
input_ids = tokenizer . encode ( text , return_tensors = "mlx" )
outputs = model ( input_ids )
embeddings = outputs [ 0 ][:, 0 , :]
使用嵌入來比較多個文本:
from sklearn . metrics . pairwise import cosine_similarity
import matplotlib . pyplot as plt
import seaborn as sns
import mlx . core as mx
from mlx_embeddings . utils import load
# Load the model and tokenizer
model , tokenizer = load ( "sentence-transformers/all-MiniLM-L6-v2" )
def get_embedding ( text , model , tokenizer ):
input_ids = tokenizer . encode ( text , return_tensors = "mlx" , padding = True , truncation = True , max_length = 512 )
outputs = model ( input_ids )
embeddings = outputs [ 0 ][:, 0 , :][ 0 ]
return embeddings
# Sample texts
texts = [
"I like grapes" ,
"I like fruits" ,
"The slow green turtle crawls under the busy ant."
]
# Generate embeddings
embeddings = [ get_embedding ( text , model , tokenizer ) for text in texts ]
# Compute similarity
similarity_matrix = cosine_similarity ( embeddings )
# Visualize results
def plot_similarity_matrix ( similarity_matrix , labels ):
plt . figure ( figsize = ( 5 , 4 ))
sns . heatmap ( similarity_matrix , annot = True , cmap = 'coolwarm' , xticklabels = labels , yticklabels = labels )
plt . title ( 'Similarity Matrix Heatmap' )
plt . tight_layout ()
plt . show ()
labels = [ f"Text { i + 1 } " for i in range ( len ( texts ))]
plot_similarity_matrix ( similarity_matrix , labels )
用於一次處理多個文字:
from sklearn . metrics . pairwise import cosine_similarity
import matplotlib . pyplot as plt
import seaborn as sns
import mlx . core as mx
from mlx_embeddings . utils import load
# Load the model and tokenizer
model , tokenizer = load ( "sentence-transformers/all-MiniLM-L6-v2" )
def get_embedding ( texts , model , tokenizer ):
inputs = tokenizer . batch_encode_plus ( texts , return_tensors = "mlx" , padding = True , truncation = True , max_length = 512 )
outputs = model (
inputs [ "input_ids" ],
attention_mask = inputs [ "attention_mask" ]
)
return outputs [ 0 ]
def compute_and_print_similarity ( embeddings ):
B , Seq_len , dim = embeddings . shape
embeddings_2d = embeddings . reshape ( B , - 1 )
similarity_matrix = cosine_similarity ( embeddings_2d )
print ( "Similarity matrix between sequences:" )
print ( similarity_matrix )
print ( " n " )
for i in range ( B ):
for j in range ( i + 1 , B ):
print ( f"Similarity between sequence { i + 1 } and sequence { j + 1 } : { similarity_matrix [ i ][ j ]:.4f } " )
return similarity_matrix
# Sample texts
texts = [
"I like grapes" ,
"I like fruits" ,
"The slow green turtle crawls under the busy ant."
]
embeddings = get_embedding ( texts , model , tokenizer )
similarity_matrix = compute_and_print_similarity ( embeddings )
# Visualize results
labels = [ f"Text { i + 1 } " for i in range ( len ( texts ))]
plot_similarity_matrix ( similarity_matrix , labels )
MLX-Embeddings 支援用於文字嵌入任務的各種模型架構。以下是目前支援的架構的細分:
我們不斷努力擴大對其他模型架構的支援。檢查我們的 GitHub 儲存庫或文檔,以了解受支援模型及其特定版本的最新清單。
歡迎為 MLX-Embeddings 做出貢獻!請參閱我們的貢獻指南以獲取更多資訊。
該專案已獲得 GNU 通用公共授權 v3 的許可。
如有任何疑問或問題,請在 GitHub 儲存庫上提出問題。