Google Gemini model provider for the Blades AI Agent framework, supporting GenAI and Vertex AI deployments.
go get github.com/go-kratos/blades/contrib/geminiNewClient function for GenAI SDKimport ( "context" "github.com/go-kratos/blades" "github.com/go-kratos/blades/contrib/gemini" "google.golang.org/genai")
// Create client with GenAI configurationclientConfig := &genai.ClientConfig{ APIKey: "your-api-key", Backend: genai.BackendGoogleAI,}
client, err := gemini.NewClient(context.Background(), clientConfig)if err != nil { panic(err)}
// Generate responsereq := &blades.ModelRequest{ Model: "gemini-2.0-flash-exp", Messages: []*blades.Message{ { Role: blades.RoleUser, Parts: []blades.Part{ blades.TextPart{Text: "What is the capital of France?"}, }, }, },}
resp, err := client.Generate(context.Background(), req)if err != nil { panic(err)}
// Access responsefor _, msg := range resp.Messages { for _, part := range msg.Parts { if textPart, ok := part.(blades.TextPart); ok { fmt.Println(textPart.Text) } }}import ( "cloud.google.com/go/auth" "google.golang.org/genai")
// Create client with Vertex AIcreds, err := auth.Detect(context.Background(), &auth.DetectOptions{ // Configure auth options})if err != nil { panic(err)}
clientConfig := &genai.ClientConfig{ Backend: genai.BackendVertexAI, Project: "my-project-id", Location: "us-central1", Credentials: creds.TokenProvider(),}
client, err := gemini.NewClient(context.Background(), clientConfig)// Use same request/response pattern as aboveEnable Gemini’s reasoning capabilities using thinking models:
import "github.com/go-kratos/blades"
// Configure thinking budgetthinkingBudget := int32(10000) // 10K tokens for reasoningincludeThoughts := true
resp, err := client.Generate(context.Background(), req, blades.WithThinkingBudget(&thinkingBudget), blades.WithIncludeThoughts(&includeThoughts),)
// The response will include thinking blocks showing the model's reasoning processfor _, msg := range resp.Messages { for _, part := range msg.Parts { if textPart, ok := part.(blades.TextPart); ok { fmt.Println(textPart.Text) } }}resp, err := client.Generate(context.Background(), req, blades.WithMaxOutputTokens(4096), blades.WithTemperature(0.7), blades.WithTopP(0.9), blades.WithMaxIterations(5), // Tool calling iterations)Define tools and let Gemini automatically execute them:
// Define a toolweatherTool := &blades.Tool{ Name: "get_weather", Description: "Get current weather for a location", Handle: func(ctx context.Context, arguments string) (string, error) { // Parse arguments and fetch weather return `{"temperature": 72, "condition": "sunny"}`, nil },}
// Add tools to requestreq := &blades.ModelRequest{ Model: "gemini-2.0-flash-exp", Tools: []*blades.Tool{weatherTool}, Messages: []*blades.Message{ { Role: blades.RoleUser, Parts: []blades.Part{ blades.TextPart{Text: "What's the weather in San Francisco?"}, }, }, },}
// Generate with automatic tool executionresp, err := client.Generate(context.Background(), req)
// Gemini will automatically:// 1. Call the tool if needed// 2. Process the tool result// 3. Return the final responseReal-time response streaming:
stream, err := client.NewStream(context.Background(), req)if err != nil { panic(err)}
for stream.Next() { resp, err := stream.Current() if err != nil { panic(err) }
for _, msg := range resp.Messages { for _, part := range msg.Parts { if textPart, ok := part.(blades.TextPart); ok { fmt.Print(textPart.Text) // Print as it streams } } }}
if err := stream.Err(); err != nil { panic(err)}This provider returns specific errors for common issues:
import "github.com/go-kratos/blades/contrib/gemini"
resp, err := client.Generate(ctx, req)if err != nil { switch err { case gemini.ErrEmptyResponse: // Handle empty response case gemini.ErrToolNotFound: // Handle missing tool case gemini.ErrTooManyIterations: // Handle iteration limit default: // Handle other errors }}Available models:
gemini-2.0-flash-exp - Latest experimental modelgemini-2.0-flash-thinking-exp-01-21 - Thinking model with extended reasoninggemini-1.5-pro - Previous generation professional modelgemini-1.5-flash - Previous generation fast model