Esta biblioteca fornece clientes GO não oficiais para a API OpenAI. Nós apoiamos:
go get github.com/sashabaranov/go-openai
Atualmente, o Go-Openai exige a versão go 1.18 ou maior.
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Hello!" ,
},
},
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Message . Content )
}
NOTA: Sua chave da API é uma informação sensível. Não compartilhe com ninguém.
package main
import (
"context"
"errors"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
MaxTokens : 20 ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Lorem ipsum" ,
},
},
Stream : true ,
}
stream , err := c . CreateChatCompletionStream ( ctx , req )
if err != nil {
fmt . Printf ( "ChatCompletionStream error: %v n " , err )
return
}
defer stream . Close ()
fmt . Printf ( "Stream response: " )
for {
response , err := stream . Recv ()
if errors . Is ( err , io . EOF ) {
fmt . Println ( " n Stream finished" )
return
}
if err != nil {
fmt . Printf ( " n Stream error: %v n " , err )
return
}
fmt . Printf ( response . Choices [ 0 ]. Delta . Content )
}
}
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. CompletionRequest {
Model : openai . GPT3Babbage002 ,
MaxTokens : 5 ,
Prompt : "Lorem ipsum" ,
}
resp , err := c . CreateCompletion ( ctx , req )
if err != nil {
fmt . Printf ( "Completion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Text )
}
package main
import (
"errors"
"context"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. CompletionRequest {
Model : openai . GPT3Babbage002 ,
MaxTokens : 5 ,
Prompt : "Lorem ipsum" ,
Stream : true ,
}
stream , err := c . CreateCompletionStream ( ctx , req )
if err != nil {
fmt . Printf ( "CompletionStream error: %v n " , err )
return
}
defer stream . Close ()
for {
response , err := stream . Recv ()
if errors . Is ( err , io . EOF ) {
fmt . Println ( "Stream finished" )
return
}
if err != nil {
fmt . Printf ( "Stream error: %v n " , err )
return
}
fmt . Printf ( "Stream response: %v n " , response )
}
}
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. AudioRequest {
Model : openai . Whisper1 ,
FilePath : "recording.mp3" ,
}
resp , err := c . CreateTranscription ( ctx , req )
if err != nil {
fmt . Printf ( "Transcription error: %v n " , err )
return
}
fmt . Println ( resp . Text )
}
package main
import (
"context"
"fmt"
"os"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( os . Getenv ( "OPENAI_KEY" ))
req := openai. AudioRequest {
Model : openai . Whisper1 ,
FilePath : os . Args [ 1 ],
Format : openai . AudioResponseFormatSRT ,
}
resp , err := c . CreateTranscription ( context . Background (), req )
if err != nil {
fmt . Printf ( "Transcription error: %v n " , err )
return
}
f , err := os . Create ( os . Args [ 1 ] + ".srt" )
if err != nil {
fmt . Printf ( "Could not open file: %v n " , err )
return
}
defer f . Close ()
if _ , err := f . WriteString ( resp . Text ); err != nil {
fmt . Printf ( "Error writing to file: %v n " , err )
return
}
}
package main
import (
"bytes"
"context"
"encoding/base64"
"fmt"
openai "github.com/sashabaranov/go-openai"
"image/png"
"os"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
// Sample image by link
reqUrl := openai. ImageRequest {
Prompt : "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail" ,
Size : openai . CreateImageSize256x256 ,
ResponseFormat : openai . CreateImageResponseFormatURL ,
N : 1 ,
}
respUrl , err := c . CreateImage ( ctx , reqUrl )
if err != nil {
fmt . Printf ( "Image creation error: %v n " , err )
return
}
fmt . Println ( respUrl . Data [ 0 ]. URL )
// Example image as base64
reqBase64 := openai. ImageRequest {
Prompt : "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine" ,
Size : openai . CreateImageSize256x256 ,
ResponseFormat : openai . CreateImageResponseFormatB64JSON ,
N : 1 ,
}
respBase64 , err := c . CreateImage ( ctx , reqBase64 )
if err != nil {
fmt . Printf ( "Image creation error: %v n " , err )
return
}
imgBytes , err := base64 . StdEncoding . DecodeString ( respBase64 . Data [ 0 ]. B64JSON )
if err != nil {
fmt . Printf ( "Base64 decode error: %v n " , err )
return
}
r := bytes . NewReader ( imgBytes )
imgData , err := png . Decode ( r )
if err != nil {
fmt . Printf ( "PNG decode error: %v n " , err )
return
}
file , err := os . Create ( "example.png" )
if err != nil {
fmt . Printf ( "File creation error: %v n " , err )
return
}
defer file . Close ()
if err := png . Encode ( file , imgData ); err != nil {
fmt . Printf ( "PNG encode error: %v n " , err )
return
}
fmt . Println ( "The image was saved as example.png" )
}
config := openai . DefaultConfig ( "token" )
proxyUrl , err := url . Parse ( "http://localhost:{port}" )
if err != nil {
panic ( err )
}
transport := & http. Transport {
Proxy : http . ProxyURL ( proxyUrl ),
}
config . HTTPClient = & http. Client {
Transport : transport ,
}
c := openai . NewClientWithConfig ( config )
Veja também: https://pkg.go.dev/github.com/sashabaranov/go-openai#clientconfig
package main
import (
"bufio"
"context"
"fmt"
"os"
"strings"
"github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
messages := make ([]openai. ChatCompletionMessage , 0 )
reader := bufio . NewReader ( os . Stdin )
fmt . Println ( "Conversation" )
fmt . Println ( "---------------------" )
for {
fmt . Print ( "-> " )
text , _ := reader . ReadString ( 'n' )
// convert CRLF to LF
text = strings . Replace ( text , " n " , "" , - 1 )
messages = append ( messages , openai. ChatCompletionMessage {
Role : openai . ChatMessageRoleUser ,
Content : text ,
})
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : messages ,
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
continue
}
content := resp . Choices [ 0 ]. Message . Content
messages = append ( messages , openai. ChatCompletionMessage {
Role : openai . ChatMessageRoleAssistant ,
Content : content ,
})
fmt . Println ( content )
}
}
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
config := openai . DefaultAzureConfig ( "your Azure OpenAI Key" , "https://your Azure OpenAI Endpoint" )
// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
// config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping := map[string]string{
// "gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
// }
client := openai . NewClientWithConfig ( config )
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Hello Azure OpenAI!" ,
},
},
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Message . Content )
}
package main
import (
"context"
"log"
openai "github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your-token" )
// Create an EmbeddingRequest for the user query
queryReq := openai. EmbeddingRequest {
Input : [] string { "How many chucks would a woodchuck chuck" },
Model : openai . AdaEmbeddingV2 ,
}
// Create an embedding for the user query
queryResponse , err := client . CreateEmbeddings ( context . Background (), queryReq )
if err != nil {
log . Fatal ( "Error creating query embedding:" , err )
}
// Create an EmbeddingRequest for the target text
targetReq := openai. EmbeddingRequest {
Input : [] string { "How many chucks would a woodchuck chuck if the woodchuck could chuck wood" },
Model : openai . AdaEmbeddingV2 ,
}
// Create an embedding for the target text
targetResponse , err := client . CreateEmbeddings ( context . Background (), targetReq )
if err != nil {
log . Fatal ( "Error creating target embedding:" , err )
}
// Now that we have the embeddings for the user query and the target text, we
// can calculate their similarity.
queryEmbedding := queryResponse . Data [ 0 ]
targetEmbedding := targetResponse . Data [ 0 ]
similarity , err := queryEmbedding . DotProduct ( & targetEmbedding )
if err != nil {
log . Fatal ( "Error calculating dot product:" , err )
}
log . Printf ( "The similarity score between the query and the target is %f" , similarity )
}
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
config := openai . DefaultAzureConfig ( "your Azure OpenAI Key" , "https://your Azure OpenAI Endpoint" )
config . APIVersion = "2023-05-15" // optional update to latest API version
//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
//config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping := map[string]string{
// "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
//}
input := "Text to vectorize"
client := openai . NewClientWithConfig ( config )
resp , err := client . CreateEmbeddings (
context . Background (),
openai. EmbeddingRequest {
Input : [] string { input },
Model : openai . AdaEmbeddingV2 ,
})
if err != nil {
fmt . Printf ( "CreateEmbeddings error: %v n " , err )
return
}
vectors := resp . Data [ 0 ]. Embedding // []float32 with 1536 dimensions
fmt . Println ( vectors [: 10 ], "..." , vectors [ len ( vectors ) - 10 :])
}
Agora é possível que a conclusão do bate -papo opte por chamar uma função para obter mais informações (consulte Docs do desenvolvedor aqui).
Para descrever o tipo de funções que podem ser chamadas, um esquema JSON deve ser fornecido. Muitas bibliotecas de esquema JSON existem e são mais avançadas do que o que podemos oferecer nesta biblioteca, no entanto, incluímos um pacote jsonschema
simples para quem deseja usar esse recurso sem formatar sua própria carga útil de esquema JSON.
Os documentos do desenvolvedor dão a esta definição de esquema JSON como exemplo:
{
"name" : " get_current_weather " ,
"description" : " Get the current weather in a given location " ,
"parameters" :{
"type" : " object " ,
"properties" :{
"location" :{
"type" : " string " ,
"description" : " The city and state, e.g. San Francisco, CA "
},
"unit" :{
"type" : " string " ,
"enum" :[
" celsius " ,
" fahrenheit "
]
}
},
"required" :[
" location "
]
}
}
Usando o pacote jsonschema
, este esquema pode ser criado usando estruturas como tal:
FunctionDefinition {
Name : "get_current_weather" ,
Parameters : jsonschema. Definition {
Type : jsonschema . Object ,
Properties : map [ string ]jsonschema. Definition {
"location" : {
Type : jsonschema . String ,
Description : "The city and state, e.g. San Francisco, CA" ,
},
"unit" : {
Type : jsonschema . String ,
Enum : [] string { "celsius" , "fahrenheit" },
},
},
Required : [] string { "location" },
},
}
O campo Parameters
de uma FunctionDefinition
pode aceitar qualquer um dos estilos acima, ou mesmo uma estrutura aninhada de outra biblioteca (desde que possa ser organizada no JSON).
Open-IA mantém uma documentação clara sobre como lidar com erros de API
exemplo:
e := &openai.APIError{}
if errors.As(err, &e) {
switch e.HTTPStatusCode {
case 401:
// invalid auth or key (do not retry)
case 429:
// rate limiting or engine overload (wait and retry)
case 500:
// openai server error (retry)
default:
// unhandled
}
}
package main
import (
"context"
"fmt"
"github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
ctx := context . Background ()
// create a .jsonl file with your training data for conversational model
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// chat models are trained using the following file format:
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]}
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
// you can use openai cli tool to validate the data
// For more info - https://platform.openai.com/docs/guides/fine-tuning
file , err := client . CreateFile ( ctx , openai. FileRequest {
FilePath : "training_prepared.jsonl" ,
Purpose : "fine-tune" ,
})
if err != nil {
fmt . Printf ( "Upload JSONL file error: %v n " , err )
return
}
// create a fine tuning job
// Streams events until the job is done (this often takes minutes, but can take hours if there are many jobs in the queue or your dataset is large)
// use below get method to know the status of your model
fineTuningJob , err := client . CreateFineTuningJob ( ctx , openai. FineTuningJobRequest {
TrainingFile : file . ID ,
Model : "davinci-002" , // gpt-3.5-turbo-0613, babbage-002.
})
if err != nil {
fmt . Printf ( "Creating new fine tune model error: %v n " , err )
return
}
fineTuningJob , err = client . RetrieveFineTuningJob ( ctx , fineTuningJob . ID )
if err != nil {
fmt . Printf ( "Getting fine tune model error: %v n " , err )
return
}
fmt . Println ( fineTuningJob . FineTunedModel )
// once the status of fineTuningJob is `succeeded`, you can use your fine tune model in Completion Request or Chat Completion Request
// resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{
// Model: fineTuningJob.FineTunedModel,
// Prompt: "your prompt",
// })
// if err != nil {
// fmt.Printf("Create completion error %vn", err)
// return
// }
//
// fmt.Println(resp.Choices[0].Text)
}
package main
import (
"context"
"fmt"
"log"
"github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/jsonschema"
)
func main () {
client := openai . NewClient ( "your token" )
ctx := context . Background ()
type Result struct {
Steps [] struct {
Explanation string `json:"explanation"`
Output string `json:"output"`
} `json:"steps"`
FinalAnswer string `json:"final_answer"`
}
var result Result
schema , err := jsonschema . GenerateSchemaForType ( result )
if err != nil {
log . Fatalf ( "GenerateSchemaForType error: %v" , err )
}
resp , err := client . CreateChatCompletion ( ctx , openai. ChatCompletionRequest {
Model : openai . GPT4oMini ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleSystem ,
Content : "You are a helpful math tutor. Guide the user through the solution step by step." ,
},
{
Role : openai . ChatMessageRoleUser ,
Content : "how can I solve 8x + 7 = -23" ,
},
},
ResponseFormat : & openai. ChatCompletionResponseFormat {
Type : openai . ChatCompletionResponseFormatTypeJSONSchema ,
JSONSchema : & openai. ChatCompletionResponseFormatJSONSchema {
Name : "math_reasoning" ,
Schema : schema ,
Strict : true ,
},
},
})
if err != nil {
log . Fatalf ( "CreateChatCompletion error: %v" , err )
}
err = schema . Unmarshal ( resp . Choices [ 0 ]. Message . Content , & result )
if err != nil {
log . Fatalf ( "Unmarshal schema error: %v" , err )
}
fmt . Println ( result )
}
Mesmo ao especificar um campo de temperatura de 0, não garante que você sempre obtenha a mesma resposta. Vários fatores entram em jogo.
Devido aos fatores mencionados acima, respostas diferentes podem ser retornadas mesmo para a mesma pergunta.
Soluções alternativas:
seed
em conjunto com o campo system_fingerprint
Response, juntamente com o gerenciamento da temperatura.math.SmallestNonzeroFloat32
: Ao especificar math.SmallestNonzeroFloat32
no campo de temperatura em vez de 0, você pode imitar o comportamento de defini -lo como 0.Ao adotar essas estratégias, você pode esperar resultados mais consistentes.
Questões relacionadas:
A opção omitempty da Secut Struct gerará uma solicitação incorreta quando o parâmetro for 0.
Não, o Go Openai não oferece um recurso para contar tokens, e não há planos de fornecer esse recurso no futuro. No entanto, se houver uma maneira de implementar um recurso de contagem de token com zero dependências, pode ser possível mesclar esse recurso no Go OpenAI. Caso contrário, seria mais apropriado implementá -lo em uma biblioteca ou repositório dedicado.
Para contar tokens, você pode encontrar os seguintes links úteis:
Questões relacionadas:
É possível ingressar na implementação do GPT3 Tokenizer
Ao seguir as diretrizes contribuintes, esperamos garantir que suas contribuições sejam feitas de maneira suave e eficiente.
Queremos levar um momento para expressar nossa mais profunda gratidão aos colaboradores e patrocinadores deste projeto:
A todos vocês: obrigado. Você nos ajudou a alcançar mais do que jamais imaginamos possível. Mal posso esperar para ver para onde vamos a seguir, juntos!