gotch
1.0.0
gotch
이미 최적화된 C++ 텐서 API(3039)와 CUDA 지원을 통한 동적 그래프 계산을 활용하기 위해 Pytorch C++ API(Libtorch)에 대한 씬 래퍼를 생성하고 Go에서 딥 러닝을 개발하고 구현하기 위한 관용적인 Go API를 제공합니다.
일부 기능은
gotch
활성 개발 모드에 있으며 API 주요 변경 사항이 있을 수 있습니다. 언제든지 끌어오기 요청, 문제 보고 또는 우려 사항에 대해 논의하세요. 모든 기여를 환영합니다.
gotch
현재 버전은 v0.9.1 입니다.
11.8
이고 그렇지 않으면 CPU 버전을 사용합니다.2.1.0
입니다. 참고 : libtorch
/usr/local/lib
에 설치됩니다.
wget https://github.com/sugarme/gotch/releases/download/v0.9.0/setup-libtorch.sh
chmod +x setup-libtorch.sh
export CUDA_VER=cpu && bash setup-libtorch.sh
업데이트 환경 : Debian/Ubuntu에서 .bashrc
파일에 다음 줄을 추가/업데이트합니다.
export GOTCH_LIBTORCH= " /usr/local/lib/libtorch "
export LIBRARY_PATH= " $LIBRARY_PATH : $GOTCH_LIBTORCH /lib "
export CPATH= " $CPATH : $GOTCH_LIBTORCH /lib: $GOTCH_LIBTORCH /include: $GOTCH_LIBTORCH /include/torch/csrc/api/include "
export LD_LIBRARY_PATH= " $LD_LIBRARY_PATH : $GOTCH_LIBTORCH /lib "
wget https://github.com/sugarme/gotch/releases/download/v0.9.0/setup-gotch.sh
chmod +x setup-gotch.sh
export CUDA_VER=cpu && export GOTCH_VER=v0.9.1 && bash setup-gotch.sh
참고 : 컴퓨터에 CUDA가 작동하는지 확인하세요.
nvidia-smi
wget https://github.com/sugarme/gotch/releases/download/v0.9.0/setup-libtorch.sh
chmod +x setup-libtorch.sh
export CUDA_VER=11.8 && bash setup-libtorch.sh
업데이트 환경 : Debian/Ubuntu에서 .bashrc
파일에 다음 줄을 추가/업데이트합니다.
export GOTCH_LIBTORCH= " /usr/local/lib/libtorch "
export LIBRARY_PATH= " $LIBRARY_PATH : $GOTCH_LIBTORCH /lib "
export CPATH= " $CPATH : $GOTCH_LIBTORCH /lib: $GOTCH_LIBTORCH /include: $GOTCH_LIBTORCH /include/torch/csrc/api/include "
LD_LIBRARY_PATH= " $LD_LIBRARY_PATH : $GOTCH_LIBTORCH /lib:/usr/lib64-nvidia:/usr/local/cuda- ${CUDA_VERSION} /lib64 "
wget https://github.com/sugarme/gotch/releases/download/v0.9.0/setup-gotch.sh
chmod +x setup-gotch.sh
export CUDA_VER=11.8 && export GOTCH_VER=v0.9.1 && bash setup-gotch.sh
import (
"fmt"
"github.com/sugarme/gotch"
"github.com/sugarme/gotch/ts"
)
func basicOps () {
xs := ts . MustRand ([] int64 { 3 , 5 , 6 }, gotch . Float , gotch . CPU )
fmt . Printf ( "%8.3f n " , xs )
fmt . Printf ( "%i" , xs )
/*
(1,.,.) =
0.391 0.055 0.638 0.514 0.757 0.446
0.817 0.075 0.437 0.452 0.077 0.492
0.504 0.945 0.863 0.243 0.254 0.640
0.850 0.132 0.763 0.572 0.216 0.116
0.410 0.660 0.156 0.336 0.885 0.391
(2,.,.) =
0.952 0.731 0.380 0.390 0.374 0.001
0.455 0.142 0.088 0.039 0.862 0.939
0.621 0.198 0.728 0.914 0.168 0.057
0.655 0.231 0.680 0.069 0.803 0.243
0.853 0.729 0.983 0.534 0.749 0.624
(3,.,.) =
0.734 0.447 0.914 0.956 0.269 0.000
0.427 0.034 0.477 0.535 0.440 0.972
0.407 0.945 0.099 0.184 0.778 0.058
0.482 0.996 0.085 0.605 0.282 0.671
0.887 0.029 0.005 0.216 0.354 0.262
TENSOR INFO:
Shape: [3 5 6]
DType: float32
Device: {CPU 1}
Defined: true
*/
// Basic tensor operations
ts1 := ts . MustArange ( ts . IntScalar ( 6 ), gotch . Int64 , gotch . CPU ). MustView ([] int64 { 2 , 3 }, true )
defer ts1 . MustDrop ()
ts2 := ts . MustOnes ([] int64 { 3 , 4 }, gotch . Int64 , gotch . CPU )
defer ts2 . MustDrop ()
mul := ts1 . MustMatmul ( ts2 , false )
defer mul . MustDrop ()
fmt . Printf ( "ts1: n %2d" , ts1 )
fmt . Printf ( "ts2: n %2d" , ts2 )
fmt . Printf ( "mul tensor (ts1 x ts2): n %2d" , mul )
/*
ts1:
0 1 2
3 4 5
ts2:
1 1 1 1
1 1 1 1
1 1 1 1
mul tensor (ts1 x ts2):
3 3 3 3
12 12 12 12
*/
// In-place operation
ts3 := ts . MustOnes ([] int64 { 2 , 3 }, gotch . Float , gotch . CPU )
fmt . Printf ( "Before: n %v" , ts3 )
ts3 . MustAddScalar_ ( ts . FloatScalar ( 2.0 ))
fmt . Printf ( "After (ts3 + 2.0): n %v" , ts3 )
/*
Before:
1 1 1
1 1 1
After (ts3 + 2.0):
3 3 3
3 3 3
*/
}
import (
"fmt"
"github.com/sugarme/gotch"
"github.com/sugarme/gotch/nn"
"github.com/sugarme/gotch/ts"
)
type Net struct {
conv1 * nn. Conv2D
conv2 * nn. Conv2D
fc * nn. Linear
}
func newNet ( vs * nn. Path ) * Net {
conv1 := nn . NewConv2D ( vs , 1 , 16 , 2 , nn . DefaultConv2DConfig ())
conv2 := nn . NewConv2D ( vs , 16 , 10 , 2 , nn . DefaultConv2DConfig ())
fc := nn . NewLinear ( vs , 10 , 10 , nn . DefaultLinearConfig ())
return & Net {
conv1 ,
conv2 ,
fc ,
}
}
func ( n Net ) ForwardT ( xs * ts. Tensor , train bool ) * ts. Tensor {
xs = xs . MustView ([] int64 { - 1 , 1 , 8 , 8 }, false )
outC1 := xs . Apply ( n . conv1 )
outMP1 := outC1 . MaxPool2DDefault ( 2 , true )
defer outMP1 . MustDrop ()
outC2 := outMP1 . Apply ( n . conv2 )
outMP2 := outC2 . MaxPool2DDefault ( 2 , true )
outView2 := outMP2 . MustView ([] int64 { - 1 , 10 }, true )
defer outView2 . MustDrop ()
outFC := outView2 . Apply ( n . fc )
return outFC . MustRelu ( true )
}
func main () {
vs := nn . NewVarStore ( gotch . CPU )
net := newNet ( vs . Root ())
xs := ts . MustOnes ([] int64 { 8 , 8 }, gotch . Float , gotch . CPU )
logits := net . ForwardT ( xs , false )
fmt . Printf ( "Logits: %0.3f" , logits )
}
//Logits: 0.000 0.000 0.000 0.225 0.321 0.147 0.000 0.207 0.000 0.000
gotch
사용하여 플레이하세요. gotch
Apache 2.0 라이센스가 있습니다.