|
| 1 | +// Copyright 2025 Google LLC |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +// Package anthropic implements the model.LLM interface backed by Claude models |
| 16 | +// served via Vertex AI. |
| 17 | +package anthropic |
| 18 | + |
| 19 | +import ( |
| 20 | + "context" |
| 21 | + "fmt" |
| 22 | + "iter" |
| 23 | + "os" |
| 24 | + |
| 25 | + "github.com/anthropics/anthropic-sdk-go" |
| 26 | + "github.com/anthropics/anthropic-sdk-go/option" |
| 27 | + "github.com/anthropics/anthropic-sdk-go/packages/ssestream" |
| 28 | + "github.com/anthropics/anthropic-sdk-go/vertex" |
| 29 | + |
| 30 | + "google.golang.org/adk/model" |
| 31 | +) |
| 32 | + |
| 33 | +const ( |
| 34 | + envProjectID = "GOOGLE_CLOUD_PROJECT" |
| 35 | + envLocation = "GOOGLE_CLOUD_LOCATION" |
| 36 | + |
| 37 | + defaultMaxTokens = 8192 |
| 38 | + defaultOAuthScope = "https://www.googleapis.com/auth/cloud-platform" |
| 39 | +) |
| 40 | + |
| 41 | +const ( |
| 42 | + ProviderVertexAI = "vertex_ai" |
| 43 | + ProviderAnthropic = "anthropic" |
| 44 | + ProviderAWSBedrock = "aws_bedrock" |
| 45 | +) |
| 46 | + |
| 47 | +// Config controls how the Anthropic-backed model is initialized. |
| 48 | +type Config struct { |
| 49 | + // Provider indicates which service is used to access the Anthropic models. |
| 50 | + // Supported values are "vertex_ai", "aws_bedrock", and "anthropic". Default is "vertex_ai". |
| 51 | + Provider string |
| 52 | + // APIKey is the API key used to authenticate with the Anthropic API. |
| 53 | + // Only required when Provider is "anthropic". |
| 54 | + APIKey string |
| 55 | + // MaxTokens sets the maximum number of tokens the model can generate. |
| 56 | + MaxTokens int64 |
| 57 | + // ClientOptions are forwarded to the underlying Anthropics SDK client. |
| 58 | + ClientOptions []option.RequestOption |
| 59 | +} |
| 60 | + |
| 61 | +func (c *Config) applyDefaults() { |
| 62 | + if c.ClientOptions == nil { |
| 63 | + c.ClientOptions = []option.RequestOption{} |
| 64 | + } |
| 65 | + if c.MaxTokens == 0 { |
| 66 | + c.MaxTokens = defaultMaxTokens |
| 67 | + } |
| 68 | + if c.Provider == "" { |
| 69 | + c.Provider = ProviderVertexAI |
| 70 | + } |
| 71 | +} |
| 72 | + |
| 73 | +type AnthropicModel struct { |
| 74 | + client anthropic.Client |
| 75 | + |
| 76 | + name string |
| 77 | + maxTokens int64 |
| 78 | +} |
| 79 | + |
| 80 | +// NewModel returns [model.LLM] backed by the Anthropic API. |
| 81 | +func NewModel(ctx context.Context, modelName string, cfg *Config) (model.LLM, error) { |
| 82 | + if modelName == "" { |
| 83 | + return nil, fmt.Errorf("model name must be provided") |
| 84 | + } |
| 85 | + |
| 86 | + if cfg == nil { |
| 87 | + cfg = &Config{} |
| 88 | + } |
| 89 | + cfg.applyDefaults() |
| 90 | + |
| 91 | + opts := append([]option.RequestOption{}, cfg.ClientOptions...) |
| 92 | + |
| 93 | + switch cfg.Provider { |
| 94 | + case ProviderAnthropic: |
| 95 | + if cfg.APIKey == "" { |
| 96 | + return nil, fmt.Errorf("API key must be provided to use Anthropic provider") |
| 97 | + } |
| 98 | + opts = append(opts, option.WithAPIKey(cfg.APIKey)) |
| 99 | + case ProviderAWSBedrock: |
| 100 | + // Do nothing special for AWS Bedrock for now. User need to provide the client option |
| 101 | + // via `bedrock.WithConfig()` or `bedrock.WithLoadDefaultConfig()`. |
| 102 | + default: |
| 103 | + projectID := os.Getenv(envProjectID) |
| 104 | + location := os.Getenv(envLocation) |
| 105 | + if projectID == "" || location == "" { |
| 106 | + return nil, fmt.Errorf("GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION must be set to use Anthropic on Vertex") |
| 107 | + } |
| 108 | + opts = append(opts, vertex.WithGoogleAuth(ctx, location, projectID, defaultOAuthScope)) |
| 109 | + } |
| 110 | + |
| 111 | + return &AnthropicModel{ |
| 112 | + name: modelName, |
| 113 | + maxTokens: cfg.MaxTokens, |
| 114 | + client: anthropic.NewClient(opts...), |
| 115 | + }, nil |
| 116 | +} |
| 117 | + |
| 118 | +func (m *AnthropicModel) Name() string { |
| 119 | + return m.name |
| 120 | +} |
| 121 | + |
| 122 | +// GenerateContent issues a Messages.New call. When stream is true, the Anthropic |
| 123 | +// streaming API is used to emit partial responses as they arrive. |
| 124 | +func (m *AnthropicModel) GenerateContent(ctx context.Context, req *model.LLMRequest, stream bool) iter.Seq2[*model.LLMResponse, error] { |
| 125 | + if stream { |
| 126 | + return m.generateStream(ctx, req) |
| 127 | + } |
| 128 | + return func(yield func(*model.LLMResponse, error) bool) { |
| 129 | + resp, err := m.generate(ctx, req) |
| 130 | + if !yield(resp, err) { |
| 131 | + return |
| 132 | + } |
| 133 | + } |
| 134 | +} |
| 135 | + |
| 136 | +func (m *AnthropicModel) generate(ctx context.Context, req *model.LLMRequest) (*model.LLMResponse, error) { |
| 137 | + if req == nil { |
| 138 | + return nil, fmt.Errorf("llm request must not be empty") |
| 139 | + } |
| 140 | + |
| 141 | + requestBuilder := RequestBuilder{modelName: m.name, maxTokens: m.maxTokens} |
| 142 | + params, err := requestBuilder.FromLLMRequest(req) |
| 143 | + if err != nil { |
| 144 | + return nil, err |
| 145 | + } |
| 146 | + |
| 147 | + msg, err := m.client.Messages.New(ctx, *params) |
| 148 | + if err != nil { |
| 149 | + return nil, fmt.Errorf("failed to send llm request to anthropic: %w", err) |
| 150 | + } |
| 151 | + |
| 152 | + responseBuilder := ResponseBuilder{} |
| 153 | + return responseBuilder.FromMessage(msg) |
| 154 | +} |
| 155 | + |
| 156 | +func (m *AnthropicModel) generateStream(ctx context.Context, req *model.LLMRequest) iter.Seq2[*model.LLMResponse, error] { |
| 157 | + return func(yield func(*model.LLMResponse, error) bool) { |
| 158 | + builder := RequestBuilder{modelName: m.name, maxTokens: m.maxTokens} |
| 159 | + params, err := builder.FromLLMRequest(req) |
| 160 | + if err != nil { |
| 161 | + yield(nil, err) |
| 162 | + return |
| 163 | + } |
| 164 | + |
| 165 | + stream := m.client.Messages.NewStreaming(ctx, *params) |
| 166 | + for resp, err := range readStreamEvents(stream) { |
| 167 | + if !yield(resp, err) { |
| 168 | + return |
| 169 | + } |
| 170 | + } |
| 171 | + } |
| 172 | +} |
| 173 | + |
| 174 | +func readStreamEvents(stream *ssestream.Stream[anthropic.MessageStreamEventUnion]) iter.Seq2[*model.LLMResponse, error] { |
| 175 | + return func(yield func(*model.LLMResponse, error) bool) { |
| 176 | + if stream == nil { |
| 177 | + yield(nil, fmt.Errorf("the stream is empty")) |
| 178 | + return |
| 179 | + } |
| 180 | + defer func() { |
| 181 | + _ = stream.Close() |
| 182 | + }() |
| 183 | + |
| 184 | + if err := stream.Err(); err != nil { |
| 185 | + yield(nil, fmt.Errorf("got the stream error: %w", err)) |
| 186 | + return |
| 187 | + } |
| 188 | + |
| 189 | + var message anthropic.Message |
| 190 | + for stream.Next() { |
| 191 | + event := stream.Current() |
| 192 | + if err := message.Accumulate(event); err != nil { |
| 193 | + yield(nil, fmt.Errorf("accumulate stream event error: %w", err)) |
| 194 | + return |
| 195 | + } |
| 196 | + |
| 197 | + partialResponse := parsePartialStreamEvent(event) |
| 198 | + if partialResponse != nil { |
| 199 | + if !yield(partialResponse, nil) { |
| 200 | + return |
| 201 | + } |
| 202 | + } |
| 203 | + |
| 204 | + if _, ok := event.AsAny().(anthropic.MessageStopEvent); ok { |
| 205 | + responseBuilder := ResponseBuilder{} |
| 206 | + finalResponse, err := responseBuilder.FromMessage(&message) |
| 207 | + if err != nil { |
| 208 | + yield(nil, err) |
| 209 | + return |
| 210 | + } |
| 211 | + finalResponse.TurnComplete = true |
| 212 | + if !yield(finalResponse, nil) { |
| 213 | + return |
| 214 | + } |
| 215 | + } |
| 216 | + } |
| 217 | + |
| 218 | + if err := stream.Err(); err != nil { |
| 219 | + yield(nil, fmt.Errorf("got the stream error: %w", err)) |
| 220 | + } |
| 221 | + } |
| 222 | +} |
0 commit comments