Blades AI Agent 框架的 Google Gemini 模型提供程序,支持 GenAI 和 Vertex AI 部署。
go get github.com/go-kratos/blades/contrib/geminiNewClient 函数import ( "context" "github.com/go-kratos/blades" "github.com/go-kratos/blades/contrib/gemini" "google.golang.org/genai")
// Create client with GenAI configurationclientConfig := &genai.ClientConfig{ APIKey: "your-api-key", Backend: genai.BackendGoogleAI,}
client, err := gemini.NewClient(context.Background(), clientConfig)if err != nil { panic(err)}
// Generate responsereq := &blades.ModelRequest{ Model: "gemini-2.0-flash-exp", Messages: []*blades.Message{ { Role: blades.RoleUser, Parts: []blades.Part{ blades.TextPart{Text: "What is the capital of France?"}, }, }, },}
resp, err := client.Generate(context.Background(), req)if err != nil { panic(err)}
// Access responsefor _, msg := range resp.Messages { for _, part := range msg.Parts { if textPart, ok := part.(blades.TextPart); ok { fmt.Println(textPart.Text) } }}import ( "cloud.google.com/go/auth" "google.golang.org/genai")
// Create client with Vertex AIcreds, err := auth.Detect(context.Background(), &auth.DetectOptions{ // Configure auth options})if err != nil { panic(err)}
clientConfig := &genai.ClientConfig{ Backend: genai.BackendVertexAI, Project: "my-project-id", Location: "us-central1", Credentials: creds.TokenProvider(),}
client, err := gemini.NewClient(context.Background(), clientConfig)// Use same request/response pattern as above使用思考模型启用 Gemini 的推理能力:
import "github.com/go-kratos/blades"
// Configure thinking budgetthinkingBudget := int32(10000) // 10K tokens for reasoningincludeThoughts := true
resp, err := client.Generate(context.Background(), req, blades.WithThinkingBudget(&thinkingBudget), blades.WithIncludeThoughts(&includeThoughts),)
// The response will include thinking blocks showing the model's reasoning processfor _, msg := range resp.Messages { for _, part := range msg.Parts { if textPart, ok := part.(blades.TextPart); ok { fmt.Println(textPart.Text) } }}resp, err := client.Generate(context.Background(), req, blades.WithMaxOutputTokens(4096), blades.WithTemperature(0.7), blades.WithTopP(0.9), blades.WithMaxIterations(5), // Tool calling iterations)定义工具并让 Gemini 自动执行它们:
// Define a toolweatherTool := &blades.Tool{ Name: "get_weather", Description: "Get current weather for a location", Handle: func(ctx context.Context, arguments string) (string, error) { // Parse arguments and fetch weather return `{"temperature": 72, "condition": "sunny"}`, nil },}
// Add tools to requestreq := &blades.ModelRequest{ Model: "gemini-2.0-flash-exp", Tools: []*blades.Tool{weatherTool}, Messages: []*blades.Message{ { Role: blades.RoleUser, Parts: []blades.Part{ blades.TextPart{Text: "What's the weather in San Francisco?"}, }, }, },}
// Generate with automatic tool executionresp, err := client.Generate(context.Background(), req)
// Gemini will automatically:// 1. Call the tool if needed// 2. Process the tool result// 3. Return the final response实时流式传输响应:
stream, err := client.NewStream(context.Background(), req)if err != nil { panic(err)}
for stream.Next() { resp, err := stream.Current() if err != nil { panic(err) }
for _, msg := range resp.Messages { for _, part := range msg.Parts { if textPart, ok := part.(blades.TextPart); ok { fmt.Print(textPart.Text) // Print as it streams } } }}
if err := stream.Err(); err != nil { panic(err)}该提供程序返回针对常见问题的特定错误:
import "github.com/go-kratos/blades/contrib/gemini"
resp, err := client.Generate(ctx, req)if err != nil { switch err { case gemini.ErrEmptyResponse: // Handle empty response case gemini.ErrToolNotFound: // Handle missing tool case gemini.ErrTooManyIterations: // Handle iteration limit default: // Handle other errors }}可用模型:
gemini-2.0-flash-exp - 最新的实验模型gemini-2.0-flash-thinking-exp-01-21 - 具有扩展推理的思考模型gemini-1.5-pro - 上一代专业模型gemini-1.5-flash - 上一代快速模型