mnn llm
llama3-8b-instruct-mnn
# clone
git clone --recurse-submodules https://github.com/wangzhaode/mnn-llm.git
cd mnn-llm
# linux
./script/build.sh
# macos
./script/build.sh
# windows msvc
./script/build.ps1
# python wheel
./script/py_build.sh
# android
./script/android_build.sh
# android apk
./script/android_app_build.sh
# ios
./script/ios_build.sh
一些编译宏:
BUILD_FOR_ANDROID
: 编译到Android设备;LLM_SUPPORT_VISION
: 是否支持视觉处理能力;DUMP_PROFILE_INFO
: 每次对话后dump出性能数据到命令行中;默认使用CPU
,如果使用其他后端或能力,可以在编译MNN时添加MNN
编译宏
-DMNN_CUDA=ON
-DMNN_OPENCL=ON
-DMNN_METAL=ON
# linux/macos
./cli_demo ./Qwen2-1.5B-Instruct-MNN/config.json # cli demo
./web_demo ./Qwen2-1.5B-Instruct-MNN/config.json ../web # web ui demo
# windows
.Debugcli_demo.exe ./Qwen2-1.5B-Instruct-MNN/config.json
.Debugweb_demo.exe ./Qwen2-1.5B-Instruct-MNN/config.json ../web
# android
adb push android_build/MNN/OFF/arm64-v8a/libMNN.so /data/local/tmp
adb push android_build/MNN/express/OFF/arm64-v8a/libMNN_Express.so /data/local/tmp
adb push android_build/libllm.so android_build/cli_demo /data/local/tmp
adb push Qwen2-1.5B-Instruct-MNN /data/local/tmp
adb shell "cd /data/local/tmp && export LD_LIBRARY_PATH=. && ./cli_demo ./Qwen2-1.5B-Instruct-MNN/config.json"