Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 23 additions & 4 deletions rig/rig-core/src/providers/anthropic/completion.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ use tracing::{Instrument, Level, enabled, info_span};

/// `claude-opus-4-6` completion model
pub const CLAUDE_OPUS_4_6: &str = "claude-opus-4-6";
/// `claude-opus-4-7` completion model
pub const CLAUDE_OPUS_4_7: &str = "claude-opus-4-7";
/// `claude-sonnet-4-6` completion model
pub const CLAUDE_SONNET_4_6: &str = "claude-sonnet-4-6";
/// `claude-haiku-4-5` completion model
Expand Down Expand Up @@ -945,7 +947,7 @@ where
///
/// | Model | Minimum tokens |
/// |-------|---------------|
/// | `claude-opus-4-6`, `claude-opus-4-5` | 4 096 |
/// | `claude-opus-4-7`, `claude-opus-4-6`, `claude-opus-4-5` | 4 096 |
/// | `claude-sonnet-4-6` | 2 048 |
/// | `claude-sonnet-4-5`, `claude-opus-4-1`, `claude-opus-4`, `claude-sonnet-4` | 1 024 |
/// | `claude-haiku-4-5` | 4 096 |
Expand Down Expand Up @@ -980,10 +982,10 @@ where
}

/// Anthropic requires a `max_tokens` parameter to be set, which is dependent on the model. If not
/// set or if set too high, the request will fail. The following values are based on the models
/// available at the time of writing.
/// set or if set too high, the request will fail. The following values are based on Anthropic's
/// published synchronous Messages API output limits for current models.
fn default_max_tokens_for_model(model: &str) -> Option<u64> {
if model.starts_with("claude-opus-4-6") {
if model.starts_with("claude-opus-4-7") || model.starts_with("claude-opus-4-6") {
Some(128_000)
} else if model.starts_with("claude-opus-4")
|| model.starts_with("claude-sonnet-4")
Expand Down Expand Up @@ -1511,6 +1513,23 @@ mod tests {
use serde_json::json;
use serde_path_to_error::deserialize;

#[test]
fn current_model_default_max_tokens_match_anthropic_limits() {
assert_eq!(default_max_tokens_for_model(CLAUDE_OPUS_4_7), Some(128_000));
assert_eq!(default_max_tokens_for_model(CLAUDE_OPUS_4_6), Some(128_000));
assert_eq!(
default_max_tokens_for_model(CLAUDE_SONNET_4_6),
Some(64_000)
);
assert_eq!(default_max_tokens_for_model(CLAUDE_HAIKU_4_5), Some(64_000));
}

#[test]
fn unknown_model_uses_conservative_default_max_tokens_fallback() {
assert_eq!(default_max_tokens_for_model("claude-unknown"), None);
assert_eq!(default_max_tokens_with_fallback("claude-unknown"), 2_048);
}

#[test]
fn test_deserialize_message() {
let assistant_message_json = r#"
Expand Down
3 changes: 3 additions & 0 deletions rig/rig-core/src/providers/openai/completion/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ where
content.serialize(serializer)
}

/// `gpt-5.5` completion model
pub const GPT_5_5: &str = "gpt-5.5";

/// `gpt-5.2` completion model
pub const GPT_5_2: &str = "gpt-5.2";

Expand Down
6 changes: 5 additions & 1 deletion rig/rig-core/src/providers/openai/image_generation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ pub const DALL_E_2: &str = "dall-e-2";
pub const DALL_E_3: &str = "dall-e-3";
pub const GPT_IMAGE_1: &str = "gpt-image-1";
pub const GPT_IMAGE_1_5: &str = "gpt-image-1.5";
pub const GPT_IMAGE_2: &str = "gpt-image-2";

#[derive(Debug, Deserialize)]
pub struct ImageGenerationData {
Expand Down Expand Up @@ -90,7 +91,10 @@ where
"size": format!("{}x{}", generation_request.width, generation_request.height),
});

if self.model.as_str() != GPT_IMAGE_1 && self.model.as_str() != GPT_IMAGE_1_5 {
if !matches!(
self.model.as_str(),
GPT_IMAGE_1 | GPT_IMAGE_1_5 | GPT_IMAGE_2
) {
merge_inplace(
&mut request,
json!({
Expand Down
1 change: 1 addition & 0 deletions rig/rig-core/tests/anthropic/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ mod empty_end_turn;
mod image;
mod models;
mod multi_turn_streaming;
mod opus_4_7;
mod plaintext_document;
mod reasoning_roundtrip;
mod reasoning_tool_roundtrip;
Expand Down
232 changes: 232 additions & 0 deletions rig/rig-core/tests/anthropic/opus_4_7.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,232 @@
//! Dedicated Claude Opus 4.7 live smoke tests.

use base64::{Engine, prelude::BASE64_STANDARD};
use rig::client::{CompletionClient, ProviderClient};
use rig::completion::message::Image;
use rig::completion::{Chat, Message, Prompt};
use rig::message::{DocumentSourceKind, ImageMediaType};
use rig::providers::anthropic::{self, completion::CLAUDE_OPUS_4_7};
use rig::streaming::{StreamingChat, StreamingPrompt};

use crate::reasoning::{self, ReasoningRoundtripAgent, WeatherTool};
use crate::support::{
Adder, BASIC_PREAMBLE, BASIC_PROMPT, EXTRACTOR_TEXT, IMAGE_FIXTURE_PATH, STREAMING_PREAMBLE,
STREAMING_PROMPT, STREAMING_TOOLS_PREAMBLE, STREAMING_TOOLS_PROMPT, STRUCTURED_OUTPUT_PROMPT,
SmokePerson, SmokeStructuredOutput, Subtract, TOOLS_PREAMBLE, TOOLS_PROMPT,
assert_contains_any_case_insensitive, assert_mentions_expected_number,
assert_nonempty_response, assert_smoke_structured_output, collect_stream_final_response,
};

fn opus_4_7_thinking_params() -> serde_json::Value {
serde_json::json!({
"thinking": { "type": "adaptive" }
})
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_prompt_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
let agent = client
.agent(CLAUDE_OPUS_4_7)
.preamble(BASIC_PREAMBLE)
.build();

let response = agent
.prompt(BASIC_PROMPT)
.await
.expect("prompt should succeed");

assert_nonempty_response(&response);
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_streaming_prompt_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
let agent = client
.agent(CLAUDE_OPUS_4_7)
.preamble(STREAMING_PREAMBLE)
.build();

let mut stream = agent.stream_prompt(STREAMING_PROMPT).await;
let response = collect_stream_final_response(&mut stream)
.await
.expect("streaming prompt should succeed");

assert_nonempty_response(&response);
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_tools_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
let agent = client
.agent(CLAUDE_OPUS_4_7)
.preamble(TOOLS_PREAMBLE)
.tool(Adder)
.tool(Subtract)
.build();

let response = agent
.prompt(TOOLS_PROMPT)
.await
.expect("tool prompt should succeed");

assert_mentions_expected_number(&response, -3);
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_streaming_tools_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
let agent = client
.agent(CLAUDE_OPUS_4_7)
.preamble(STREAMING_TOOLS_PREAMBLE)
.tool(Adder)
.tool(Subtract)
.build();

let mut stream = agent.stream_prompt(STREAMING_TOOLS_PROMPT).await;
let response = collect_stream_final_response(&mut stream)
.await
.expect("streaming tool prompt should succeed");

assert_mentions_expected_number(&response, -3);
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_structured_output_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
let agent = client
.agent(CLAUDE_OPUS_4_7)
.output_schema::<SmokeStructuredOutput>()
.build();

let response = agent
.prompt(STRUCTURED_OUTPUT_PROMPT)
.await
.expect("structured output prompt should succeed");
let structured: SmokeStructuredOutput =
serde_json::from_str(&response).expect("structured output should deserialize");

assert_smoke_structured_output(&structured);
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_extractor_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
let extractor = client.extractor::<SmokePerson>(CLAUDE_OPUS_4_7).build();

let response = extractor
.extract_with_usage(EXTRACTOR_TEXT)
.await
.expect("extractor request should succeed");

assert_nonempty_response(
response
.data
.first_name
.as_deref()
.expect("first name should be present"),
);
assert_nonempty_response(
response
.data
.last_name
.as_deref()
.expect("last name should be present"),
);
assert!(response.usage.total_tokens > 0, "usage should be populated");
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_image_input_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
let agent = client
.agent(CLAUDE_OPUS_4_7)
.preamble("You are an image describer.")
.build();
let image_bytes = std::fs::read(IMAGE_FIXTURE_PATH).expect("fixture image should be readable");
let image = Image {
data: DocumentSourceKind::base64(&BASE64_STANDARD.encode(image_bytes)),
media_type: Some(ImageMediaType::JPEG),
..Default::default()
};

let response = agent
.prompt(image)
.await
.expect("image prompt should succeed");

assert_nonempty_response(&response);
assert_contains_any_case_insensitive(&response, &["ant", "insect"]);
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_adaptive_thinking_nonstreaming_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
reasoning::run_reasoning_roundtrip_nonstreaming(ReasoningRoundtripAgent::new(
client.completion_model(CLAUDE_OPUS_4_7),
Some(opus_4_7_thinking_params()),
))
.await;
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_adaptive_thinking_streaming_smoke() {
let client = anthropic::Client::from_env().expect("client should build");
reasoning::run_reasoning_roundtrip_streaming(ReasoningRoundtripAgent::new(
client.completion_model(CLAUDE_OPUS_4_7),
Some(opus_4_7_thinking_params()),
))
.await;
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_adaptive_thinking_tool_roundtrip_smoke() {
let call_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
let client = anthropic::Client::from_env().expect("client should build");
let agent = client
.agent(CLAUDE_OPUS_4_7)
.preamble(reasoning::TOOL_SYSTEM_PROMPT)
.max_tokens(16384)
.tool(WeatherTool::new(call_count.clone()))
.additional_params(opus_4_7_thinking_params())
.build();

let result = agent
.chat(reasoning::TOOL_USER_PROMPT, Vec::<Message>::new())
.await
.expect("adaptive thinking tool chat should succeed");

reasoning::assert_nonstreaming_universal(&result, &call_count, "anthropic");
}

#[tokio::test]
#[ignore = "requires ANTHROPIC_API_KEY"]
async fn messages_adaptive_thinking_streaming_tool_roundtrip_smoke() {
let call_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
let client = anthropic::Client::from_env().expect("client should build");
let agent = client
.agent(CLAUDE_OPUS_4_7)
.preamble(reasoning::TOOL_SYSTEM_PROMPT)
.max_tokens(16384)
.tool(WeatherTool::new(call_count.clone()))
.additional_params(opus_4_7_thinking_params())
.build();

let stream = agent
.stream_chat(reasoning::TOOL_USER_PROMPT, Vec::<Message>::new())
.multi_turn(3)
.await;

let stats = reasoning::collect_stream_stats(stream, "anthropic").await;
reasoning::assert_universal(&stats, &call_count, "anthropic");
}
Loading
Loading