此儲存庫提供了一組ROS 2 軟體包,用於將llama.cpp 整合到ROS 2 中。合併到您的ROS 2 項目。您也可以使用 llama.cpp 中的功能(例如 GBNF 語法)並即時修改 LoRA。
要使用 CUDA 執行 llama_ros,首先必須安裝 CUDA 工具包。然後,您可以使用--cmake-args -DGGML_CUDA=ON
編譯 llama_ros 以啟用 CUDA 支援。
cd ~ /ros2_ws/src
git clone https://github.com/mgonzs13/llama_ros.git
pip3 install -r llama_ros/requirements.txt
cd ~ /ros2_ws
rosdep install --from-paths src --ignore-src -r -y
colcon build --cmake-args -DGGML_CUDA=ON # add this for CUDA
建置 llama_ros docker 或從 DockerHub 下載映像。您可以選擇使用 CUDA ( USE_CUDA
) 建置 llama_ros 並選擇 CUDA 版本 ( CUDA_VERSION
)。請記住,建置映像時必須使用DOCKER_BUILDKIT=0
來使用 CUDA 編譯 llama_ros。
DOCKER_BUILDKIT=0 docker build -t llama_ros --build-arg USE_CUDA=1 --build-arg CUDA_VERSION=12-6 .
運行 Docker 容器。如果你想使用 CUDA,你必須安裝 NVIDIA Container Tollkit 並新增--gpus all
。
docker run -it --rm --gpus all llama_ros
llama_ros 中包含指令,可加快 ROS 2 生態系統中基於 GGUF 的 LLM 的測試速度。這樣,以下命令將整合到 ROS 2 命令中:
使用此命令從 YAML 檔案啟動 LLM。 YAML 的設定用於以與使用常規啟動檔案相同的方式啟動 LLM。以下是如何使用它的範例:
ros2 llama launch ~ /ros2_ws/src/llama_ros/llama_bringup/models/StableLM-Zephyr.yaml
使用此命令向已啟動的 LLM 發送提示。該命令使用一個字串,它是提示符並具有以下參數:
-r
, --reset
): 提示前是否重設 LLM-t
, --temp
): 溫度值--image-url
): 傳送到 VLM 的圖像 url以下是如何使用它的範例:
ros2 llama prompt " Do you know ROS 2? " -t 0.0
首先,您需要建立一個啟動檔案來使用llama_ros或llava_ros。該啟動檔案將包含從 HuggingFace 下載模型並配置它的主要參數。查看以下範例和預先定義的啟動檔。
from launch import LaunchDescription
from llama_bringup . utils import create_llama_launch
def generate_launch_description ():
return LaunchDescription ([
create_llama_launch (
n_ctx = 2048 , # context of the LLM in tokens
n_batch = 8 , # batch size in tokens
n_gpu_layers = 0 , # layers to load in GPU
n_threads = 1 , # threads
n_predict = 2048 , # max tokens, -1 == inf
model_repo = "TheBloke/Marcoroni-7B-v3-GGUF" , # Hugging Face repo
model_filename = "marcoroni-7b-v3.Q4_K_M.gguf" , # model file in repo
system_prompt_type = "Alpaca" # system prompt type
)
])
ros2 launch llama_bringup marcoroni.launch.py
n_ctx : 2048 # context of the LLM in tokens
n_batch : 8 # batch size in tokens
n_gpu_layers : 0 # layers to load in GPU
n_threads : 1 # threads
n_predict : 2048 # max tokens, -1 == inf
model_repo : " cstr/Spaetzle-v60-7b-GGUF " # Hugging Face repo
model_filename : " Spaetzle-v60-7b-q4-k-m.gguf " # model file in repo
system_prompt_type : " Alpaca " # system prompt type
import os
from launch import LaunchDescription
from llama_bringup . utils import create_llama_launch_from_yaml
from ament_index_python . packages import get_package_share_directory
def generate_launch_description ():
return LaunchDescription ([
create_llama_launch_from_yaml ( os . path . join (
get_package_share_directory ( "llama_bringup" ), "models" , "Spaetzle.yaml" ))
])
ros2 launch llama_bringup spaetzle.launch.py
n_ctx : 2048 # context of the LLM in tokens
n_batch : 8 # batch size in tokens
n_gpu_layers : 0 # layers to load in GPU
n_threads : 1 # threads
n_predict : 2048 # max tokens, -1 == inf
model_repo : " Qwen/Qwen2.5-Coder-7B-Instruct-GGUF " # Hugging Face repo
model_filename : " qwen2.5-coder-7b-instruct-q4_k_m-00001-of-00002.gguf " # model shard file in repo
system_prompt_type : " ChatML " # system prompt type
ros2 llama launch Qwen2.yaml
from launch import LaunchDescription
from llama_bringup . utils import create_llama_launch
def generate_launch_description ():
return LaunchDescription ([
create_llama_launch (
use_llava = True , # enable llava
n_ctx = 8192 , # context of the LLM in tokens, use a huge context size to load images
n_batch = 512 , # batch size in tokens
n_gpu_layers = 33 , # layers to load in GPU
n_threads = 1 , # threads
n_predict = 8192 , # max tokens, -1 == inf
model_repo = "cjpais/llava-1.6-mistral-7b-gguf" , # Hugging Face repo
model_filename = "llava-v1.6-mistral-7b.Q4_K_M.gguf" , # model file in repo
mmproj_repo = "cjpais/llava-1.6-mistral-7b-gguf" , # Hugging Face repo
mmproj_filename = "mmproj-model-f16.gguf" , # mmproj file in repo
system_prompt_type = "Mistral" # system prompt type
)
])
ros2 launch llama_bringup llava.launch.py
use_llava : True # enable llava
n_ctx : 8192 # context of the LLM in tokens use a huge context size to load images
n_batch : 512 # batch size in tokens
n_gpu_layers : 33 # layers to load in GPU
n_threads : 1 # threads
n_predict : 8192 # max tokens -1 : : inf
model_repo : " cjpais/llava-1.6-mistral-7b-gguf " # Hugging Face repo
model_filename : " llava-v1.6-mistral-7b.Q4_K_M.gguf " # model file in repo
mmproj_repo : " cjpais/llava-1.6-mistral-7b-gguf " # Hugging Face repo
mmproj_filename : " mmproj-model-f16.gguf " # mmproj file in repo
system_prompt_type : " mistral " # system prompt type
def generate_launch_description ():
return LaunchDescription ([
create_llama_launch_from_yaml ( os . path . join (
get_package_share_directory ( "llama_bringup" ),
"models" , "llava-1.6-mistral-7b-gguf.yaml" ))
])
ros2 launch llama_bringup llava.launch.py
您可以在啟動 LLM 時使用 LoRA 轉接器。使用 llama.cpp 功能,您可以載入多個轉接器,選擇適用於每個轉接器的比例。這裡有一個將 LoRA 適配器與 Phi-3 結合使用的範例。您可以使用/llama/list_loras
服務列出 LoRA,並使用/llama/update_loras
服務修改其比例值。比例值為 0.0 表示不使用該 LoRA。
n_ctx : 2048
n_batch : 8
n_gpu_layers : 0
n_threads : 1
n_predict : 2048
model_repo : " bartowski/Phi-3.5-mini-instruct-GGUF "
model_filename : " Phi-3.5-mini-instruct-Q4_K_M.gguf "
lora_adapters :
- repo : " zhhan/adapter-Phi-3-mini-4k-instruct_code_writing "
filename : " Phi-3-mini-4k-instruct-adaptor-f16-code_writer.gguf "
scale : 0.5
- repo : " zhhan/adapter-Phi-3-mini-4k-instruct_summarization "
filename : " Phi-3-mini-4k-instruct-adaptor-f16-summarization.gguf "
scale : 0.5
system_prompt_type : " Phi-3 "
llama_ros 和 llava_ros 都提供 ROS 2 介面來存取模型的主要功能。這裡有一些關於如何在 ROS 2 節點中使用它們的範例。此外,請查看 llama_demo_node.py 和 llava_demo_node.py 示範。
from rclpy . node import Node
from llama_msgs . srv import Tokenize
class ExampleNode ( Node ):
def __init__ ( self ) -> None :
super (). __init__ ( "example_node" )
# create the client
self . srv_client = self . create_client ( Tokenize , "/llama/tokenize" )
# create the request
req = Tokenize . Request ()
req . text = "Example text"
# call the tokenize service
self . srv_client . wait_for_service ()
tokens = self . srv_client . call ( req ). tokens
from rclpy . node import Node
from llama_msgs . srv import Detokenize
class ExampleNode ( Node ):
def __init__ ( self ) -> None :
super (). __init__ ( "example_node" )
# create the client
self . srv_client = self . create_client ( Detokenize , "/llama/detokenize" )
# create the request
req = Detokenize . Request ()
req . tokens = [ 123 , 123 ]
# call the tokenize service
self . srv_client . wait_for_service ()
text = self . srv_client . call ( req ). text
請記住啟動 llama_ros 並將嵌入設為 true 以便能夠使用您的 LLM 生成嵌入。
from rclpy . node import Node
from llama_msgs . srv import Embeddings
class ExampleNode ( Node ):
def __init__ ( self ) -> None :
super (). __init__ ( "example_node" )
# create the client
self . srv_client = self . create_client ( Embeddings , "/llama/generate_embeddings" )
# create the request
req = Embeddings . Request ()
req . prompt = "Example text"
req . normalize = True
# call the embedding service
self . srv_client . wait_for_service ()
embeddings = self . srv_client . call ( req ). embeddings
import rclpy
from rclpy . node import Node
from rclpy . action import ActionClient
from llama_msgs . action import GenerateResponse
class ExampleNode ( Node ):
def __init__ ( self ) -> None :
super (). __init__ ( "example_node" )
# create the client
self . action_client = ActionClient (
self , GenerateResponse , "/llama/generate_response" )
# create the goal and set the sampling config
goal = GenerateResponse . Goal ()
goal . prompt = self . prompt
goal . sampling_config . temp = 0.2
# wait for the server and send the goal
self . action_client . wait_for_server ()
send_goal_future = self . action_client . send_goal_async (
goal )
# wait for the server
rclpy . spin_until_future_complete ( self , send_goal_future )
get_result_future = send_goal_future . result (). get_result_async ()
# wait again and take the result
rclpy . spin_until_future_complete ( self , get_result_future )
result : GenerateResponse . Result = get_result_future . result (). result
import cv2
from cv_bridge import CvBridge
import rclpy
from rclpy . node import Node
from rclpy . action import ActionClient
from llama_msgs . action import GenerateResponse
class ExampleNode ( Node ):
def __init__ ( self ) -> None :
super (). __init__ ( "example_node" )
# create a cv bridge for the image
self . cv_bridge = CvBridge ()
# create the client
self . action_client = ActionClient (
self , GenerateResponse , "/llama/generate_response" )
# create the goal and set the sampling config
goal = GenerateResponse . Goal ()
goal . prompt = self . prompt
goal . sampling_config . temp = 0.2
# add your image to the goal
image = cv2 . imread ( "/path/to/your/image" , cv2 . IMREAD_COLOR )
goal . image = self . cv_bridge . cv2_to_imgmsg ( image )
# wait for the server and send the goal
self . action_client . wait_for_server ()
send_goal_future = self . action_client . send_goal_async (
goal )
# wait for the server
rclpy . spin_until_future_complete ( self , send_goal_future )
get_result_future = send_goal_future . result (). get_result_async ()
# wait again and take the result
rclpy . spin_until_future_complete ( self , get_result_future )
result : GenerateResponse . Result = get_result_future . result (). result
LangChain 有一個 llama_ros 整合。因此,可以應用及時的工程技術。這裡有一個使用它的例子。
import rclpy
from llama_ros . langchain import LlamaROS
from langchain . prompts import PromptTemplate
from langchain_core . output_parsers import StrOutputParser
rclpy . init ()
# create the llama_ros llm for langchain
llm = LlamaROS ()
# create a prompt template
prompt_template = "tell me a joke about {topic}"
prompt = PromptTemplate (
input_variables = [ "topic" ],
template = prompt_template
)
# create a chain with the llm and the prompt template
chain = prompt | llm | StrOutputParser ()
# run the chain
text = chain . invoke ({ "topic" : "bears" })
print ( text )
rclpy . shutdown ()
import rclpy
from llama_ros . langchain import LlamaROS
from langchain . prompts import PromptTemplate
from langchain_core . output_parsers import StrOutputParser
rclpy . init ()
# create the llama_ros llm for langchain
llm = LlamaROS ()
# create a prompt template
prompt_template = "tell me a joke about {topic}"
prompt = PromptTemplate (
input_variables = [ "topic" ],
template = prompt_template
)
# create a chain with the llm and the prompt template
chain = prompt | llm | StrOutputParser ()
# run the chain
for c in chain . stream ({ "topic" : "bears" }):
print ( c , flush = True , end = "" )
rclpy . shutdown ()
import rclpy
from llama_ros . langchain import LlamaROS
rclpy . init ()
# create the llama_ros llm for langchain
llm = LlamaROS ()
# bind the url_image
llm = llm . bind ( image_url = image_url ). stream ( "Describe the image" )
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
# run the llm
for c in llm :
print ( c , flush = True , end = "" )
rclpy . shutdown ()
import rclpy
from langchain_chroma import Chroma
from llama_ros . langchain import LlamaROSEmbeddings
rclpy . init ()
# create the llama_ros embeddings for langchain
embeddings = LlamaROSEmbeddings ()
# create a vector database and assign it
db = Chroma ( embedding_function = embeddings )
# create the retriever
retriever = db . as_retriever ( search_kwargs = { "k" : 5 })
# add your texts
db . add_texts ( texts = [ "your_texts" ])
# retrieve documents
documents = retriever . invoke ( "your_query" )
print ( documents )
rclpy . shutdown ()
import rclpy
from llama_ros . langchain import LlamaROSReranker
from llama_ros . langchain import LlamaROSEmbeddings
from langchain_community . vectorstores import FAISS
from langchain_community . document_loaders import TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain . retrievers import ContextualCompressionRetriever
rclpy . init ()
# load the documents
documents = TextLoader ( "../state_of_the_union.txt" ,). load ()
text_splitter = RecursiveCharacterTextSplitter (
chunk_size = 500 , chunk_overlap = 100 )
texts = text_splitter . split_documents ( documents )
# create the llama_ros embeddings
embeddings = LlamaROSEmbeddings ()
# create the VD and the retriever
retriever = FAISS . from_documents (
texts , embeddings ). as_retriever ( search_kwargs = { "k" : 20 })
# create the compressor using the llama_ros reranker
compressor = LlamaROSReranker ()
compression_retriever = ContextualCompressionRetriever (
base_compressor = compressor , base_retriever = retriever
)
# retrieve the documents
compressed_docs = compression_retriever . invoke (
"What did the president say about Ketanji Jackson Brown"
)
for doc in compressed_docs :
print ( "-" * 50 )
print ( doc . page_content )
print ( " n " )
rclpy . shutdown ()