diff --git a/rig/rig-core/src/providers/anthropic/completion.rs b/rig/rig-core/src/providers/anthropic/completion.rs index aa34dea4e..bd738c246 100644 --- a/rig/rig-core/src/providers/anthropic/completion.rs +++ b/rig/rig-core/src/providers/anthropic/completion.rs @@ -23,6 +23,8 @@ use tracing::{Instrument, Level, enabled, info_span}; /// `claude-opus-4-6` completion model pub const CLAUDE_OPUS_4_6: &str = "claude-opus-4-6"; +/// `claude-opus-4-7` completion model +pub const CLAUDE_OPUS_4_7: &str = "claude-opus-4-7"; /// `claude-sonnet-4-6` completion model pub const CLAUDE_SONNET_4_6: &str = "claude-sonnet-4-6"; /// `claude-haiku-4-5` completion model @@ -945,7 +947,7 @@ where /// /// | Model | Minimum tokens | /// |-------|---------------| - /// | `claude-opus-4-6`, `claude-opus-4-5` | 4 096 | + /// | `claude-opus-4-7`, `claude-opus-4-6`, `claude-opus-4-5` | 4 096 | /// | `claude-sonnet-4-6` | 2 048 | /// | `claude-sonnet-4-5`, `claude-opus-4-1`, `claude-opus-4`, `claude-sonnet-4` | 1 024 | /// | `claude-haiku-4-5` | 4 096 | @@ -980,10 +982,10 @@ where } /// Anthropic requires a `max_tokens` parameter to be set, which is dependent on the model. If not -/// set or if set too high, the request will fail. The following values are based on the models -/// available at the time of writing. +/// set or if set too high, the request will fail. The following values are based on Anthropic's +/// published synchronous Messages API output limits for current models. fn default_max_tokens_for_model(model: &str) -> Option { - if model.starts_with("claude-opus-4-6") { + if model.starts_with("claude-opus-4-7") || model.starts_with("claude-opus-4-6") { Some(128_000) } else if model.starts_with("claude-opus-4") || model.starts_with("claude-sonnet-4") @@ -1511,6 +1513,23 @@ mod tests { use serde_json::json; use serde_path_to_error::deserialize; + #[test] + fn current_model_default_max_tokens_match_anthropic_limits() { + assert_eq!(default_max_tokens_for_model(CLAUDE_OPUS_4_7), Some(128_000)); + assert_eq!(default_max_tokens_for_model(CLAUDE_OPUS_4_6), Some(128_000)); + assert_eq!( + default_max_tokens_for_model(CLAUDE_SONNET_4_6), + Some(64_000) + ); + assert_eq!(default_max_tokens_for_model(CLAUDE_HAIKU_4_5), Some(64_000)); + } + + #[test] + fn unknown_model_uses_conservative_default_max_tokens_fallback() { + assert_eq!(default_max_tokens_for_model("claude-unknown"), None); + assert_eq!(default_max_tokens_with_fallback("claude-unknown"), 2_048); + } + #[test] fn test_deserialize_message() { let assistant_message_json = r#" diff --git a/rig/rig-core/src/providers/openai/completion/mod.rs b/rig/rig-core/src/providers/openai/completion/mod.rs index f8fb0b600..046e09b97 100644 --- a/rig/rig-core/src/providers/openai/completion/mod.rs +++ b/rig/rig-core/src/providers/openai/completion/mod.rs @@ -41,6 +41,9 @@ where content.serialize(serializer) } +/// `gpt-5.5` completion model +pub const GPT_5_5: &str = "gpt-5.5"; + /// `gpt-5.2` completion model pub const GPT_5_2: &str = "gpt-5.2"; diff --git a/rig/rig-core/src/providers/openai/image_generation.rs b/rig/rig-core/src/providers/openai/image_generation.rs index ff2a52c36..745476c42 100644 --- a/rig/rig-core/src/providers/openai/image_generation.rs +++ b/rig/rig-core/src/providers/openai/image_generation.rs @@ -15,6 +15,7 @@ pub const DALL_E_2: &str = "dall-e-2"; pub const DALL_E_3: &str = "dall-e-3"; pub const GPT_IMAGE_1: &str = "gpt-image-1"; pub const GPT_IMAGE_1_5: &str = "gpt-image-1.5"; +pub const GPT_IMAGE_2: &str = "gpt-image-2"; #[derive(Debug, Deserialize)] pub struct ImageGenerationData { @@ -90,7 +91,10 @@ where "size": format!("{}x{}", generation_request.width, generation_request.height), }); - if self.model.as_str() != GPT_IMAGE_1 && self.model.as_str() != GPT_IMAGE_1_5 { + if !matches!( + self.model.as_str(), + GPT_IMAGE_1 | GPT_IMAGE_1_5 | GPT_IMAGE_2 + ) { merge_inplace( &mut request, json!({ diff --git a/rig/rig-core/tests/anthropic/mod.rs b/rig/rig-core/tests/anthropic/mod.rs index b3bb688a1..aa6eebb9e 100644 --- a/rig/rig-core/tests/anthropic/mod.rs +++ b/rig/rig-core/tests/anthropic/mod.rs @@ -4,6 +4,7 @@ mod empty_end_turn; mod image; mod models; mod multi_turn_streaming; +mod opus_4_7; mod plaintext_document; mod reasoning_roundtrip; mod reasoning_tool_roundtrip; diff --git a/rig/rig-core/tests/anthropic/opus_4_7.rs b/rig/rig-core/tests/anthropic/opus_4_7.rs new file mode 100644 index 000000000..ff23d3139 --- /dev/null +++ b/rig/rig-core/tests/anthropic/opus_4_7.rs @@ -0,0 +1,232 @@ +//! Dedicated Claude Opus 4.7 live smoke tests. + +use base64::{Engine, prelude::BASE64_STANDARD}; +use rig::client::{CompletionClient, ProviderClient}; +use rig::completion::message::Image; +use rig::completion::{Chat, Message, Prompt}; +use rig::message::{DocumentSourceKind, ImageMediaType}; +use rig::providers::anthropic::{self, completion::CLAUDE_OPUS_4_7}; +use rig::streaming::{StreamingChat, StreamingPrompt}; + +use crate::reasoning::{self, ReasoningRoundtripAgent, WeatherTool}; +use crate::support::{ + Adder, BASIC_PREAMBLE, BASIC_PROMPT, EXTRACTOR_TEXT, IMAGE_FIXTURE_PATH, STREAMING_PREAMBLE, + STREAMING_PROMPT, STREAMING_TOOLS_PREAMBLE, STREAMING_TOOLS_PROMPT, STRUCTURED_OUTPUT_PROMPT, + SmokePerson, SmokeStructuredOutput, Subtract, TOOLS_PREAMBLE, TOOLS_PROMPT, + assert_contains_any_case_insensitive, assert_mentions_expected_number, + assert_nonempty_response, assert_smoke_structured_output, collect_stream_final_response, +}; + +fn opus_4_7_thinking_params() -> serde_json::Value { + serde_json::json!({ + "thinking": { "type": "adaptive" } + }) +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_prompt_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + let agent = client + .agent(CLAUDE_OPUS_4_7) + .preamble(BASIC_PREAMBLE) + .build(); + + let response = agent + .prompt(BASIC_PROMPT) + .await + .expect("prompt should succeed"); + + assert_nonempty_response(&response); +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_streaming_prompt_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + let agent = client + .agent(CLAUDE_OPUS_4_7) + .preamble(STREAMING_PREAMBLE) + .build(); + + let mut stream = agent.stream_prompt(STREAMING_PROMPT).await; + let response = collect_stream_final_response(&mut stream) + .await + .expect("streaming prompt should succeed"); + + assert_nonempty_response(&response); +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_tools_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + let agent = client + .agent(CLAUDE_OPUS_4_7) + .preamble(TOOLS_PREAMBLE) + .tool(Adder) + .tool(Subtract) + .build(); + + let response = agent + .prompt(TOOLS_PROMPT) + .await + .expect("tool prompt should succeed"); + + assert_mentions_expected_number(&response, -3); +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_streaming_tools_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + let agent = client + .agent(CLAUDE_OPUS_4_7) + .preamble(STREAMING_TOOLS_PREAMBLE) + .tool(Adder) + .tool(Subtract) + .build(); + + let mut stream = agent.stream_prompt(STREAMING_TOOLS_PROMPT).await; + let response = collect_stream_final_response(&mut stream) + .await + .expect("streaming tool prompt should succeed"); + + assert_mentions_expected_number(&response, -3); +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_structured_output_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + let agent = client + .agent(CLAUDE_OPUS_4_7) + .output_schema::() + .build(); + + let response = agent + .prompt(STRUCTURED_OUTPUT_PROMPT) + .await + .expect("structured output prompt should succeed"); + let structured: SmokeStructuredOutput = + serde_json::from_str(&response).expect("structured output should deserialize"); + + assert_smoke_structured_output(&structured); +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_extractor_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + let extractor = client.extractor::(CLAUDE_OPUS_4_7).build(); + + let response = extractor + .extract_with_usage(EXTRACTOR_TEXT) + .await + .expect("extractor request should succeed"); + + assert_nonempty_response( + response + .data + .first_name + .as_deref() + .expect("first name should be present"), + ); + assert_nonempty_response( + response + .data + .last_name + .as_deref() + .expect("last name should be present"), + ); + assert!(response.usage.total_tokens > 0, "usage should be populated"); +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_image_input_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + let agent = client + .agent(CLAUDE_OPUS_4_7) + .preamble("You are an image describer.") + .build(); + let image_bytes = std::fs::read(IMAGE_FIXTURE_PATH).expect("fixture image should be readable"); + let image = Image { + data: DocumentSourceKind::base64(&BASE64_STANDARD.encode(image_bytes)), + media_type: Some(ImageMediaType::JPEG), + ..Default::default() + }; + + let response = agent + .prompt(image) + .await + .expect("image prompt should succeed"); + + assert_nonempty_response(&response); + assert_contains_any_case_insensitive(&response, &["ant", "insect"]); +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_adaptive_thinking_nonstreaming_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + reasoning::run_reasoning_roundtrip_nonstreaming(ReasoningRoundtripAgent::new( + client.completion_model(CLAUDE_OPUS_4_7), + Some(opus_4_7_thinking_params()), + )) + .await; +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_adaptive_thinking_streaming_smoke() { + let client = anthropic::Client::from_env().expect("client should build"); + reasoning::run_reasoning_roundtrip_streaming(ReasoningRoundtripAgent::new( + client.completion_model(CLAUDE_OPUS_4_7), + Some(opus_4_7_thinking_params()), + )) + .await; +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_adaptive_thinking_tool_roundtrip_smoke() { + let call_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0)); + let client = anthropic::Client::from_env().expect("client should build"); + let agent = client + .agent(CLAUDE_OPUS_4_7) + .preamble(reasoning::TOOL_SYSTEM_PROMPT) + .max_tokens(16384) + .tool(WeatherTool::new(call_count.clone())) + .additional_params(opus_4_7_thinking_params()) + .build(); + + let result = agent + .chat(reasoning::TOOL_USER_PROMPT, Vec::::new()) + .await + .expect("adaptive thinking tool chat should succeed"); + + reasoning::assert_nonstreaming_universal(&result, &call_count, "anthropic"); +} + +#[tokio::test] +#[ignore = "requires ANTHROPIC_API_KEY"] +async fn messages_adaptive_thinking_streaming_tool_roundtrip_smoke() { + let call_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0)); + let client = anthropic::Client::from_env().expect("client should build"); + let agent = client + .agent(CLAUDE_OPUS_4_7) + .preamble(reasoning::TOOL_SYSTEM_PROMPT) + .max_tokens(16384) + .tool(WeatherTool::new(call_count.clone())) + .additional_params(opus_4_7_thinking_params()) + .build(); + + let stream = agent + .stream_chat(reasoning::TOOL_USER_PROMPT, Vec::::new()) + .multi_turn(3) + .await; + + let stats = reasoning::collect_stream_stats(stream, "anthropic").await; + reasoning::assert_universal(&stats, &call_count, "anthropic"); +} diff --git a/rig/rig-core/tests/openai/gpt_5_5.rs b/rig/rig-core/tests/openai/gpt_5_5.rs new file mode 100644 index 000000000..830890538 --- /dev/null +++ b/rig/rig-core/tests/openai/gpt_5_5.rs @@ -0,0 +1,461 @@ +//! Dedicated GPT-5.5 live smoke tests. + +use base64::{Engine, prelude::BASE64_STANDARD}; +use rig::client::{CompletionClient, ProviderClient}; +use rig::completion::message::Image; +use rig::completion::{Chat, Message}; +use rig::completion::{Prompt, TypedPrompt}; +use rig::message::{DocumentSourceKind, ImageDetail, ImageMediaType}; +use rig::providers::openai; +use rig::streaming::{StreamingChat, StreamingPrompt}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "websocket")] +use rig::completion::CompletionModel; +#[cfg(feature = "websocket")] +use rig::providers::openai::responses_api::websocket::ResponsesWebSocketEvent; + +use crate::reasoning::{self, ReasoningRoundtripAgent, WeatherTool}; +use crate::support::{ + Adder, BASIC_PREAMBLE, BASIC_PROMPT, EXTRACTOR_TEXT, IMAGE_FIXTURE_PATH, STREAMING_PREAMBLE, + STREAMING_PROMPT, STREAMING_TOOLS_PREAMBLE, STREAMING_TOOLS_PROMPT, SmokePerson, + SmokeStructuredOutput, Subtract, TOOLS_PREAMBLE, TOOLS_PROMPT, + assert_contains_any_case_insensitive, assert_mentions_expected_number, + assert_nonempty_response, assert_smoke_structured_output, collect_stream_final_response, +}; + +#[derive(Debug, Deserialize, JsonSchema, Serialize)] +struct Gpt55Event { + title: String, + category: String, + summary: String, +} + +fn gpt_5_5_reasoning_params() -> serde_json::Value { + serde_json::json!({ + "reasoning": { "effort": "xhigh" } + }) +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_prompt_smoke() { + let client = openai::Client::from_env().expect("client should build"); + let agent = client + .agent(openai::GPT_5_5) + .preamble(BASIC_PREAMBLE) + .build(); + + let response = agent + .prompt(BASIC_PROMPT) + .await + .expect("prompt should succeed"); + + assert_nonempty_response(&response); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_streaming_prompt_smoke() { + let client = openai::Client::from_env().expect("client should build"); + let agent = client + .agent(openai::GPT_5_5) + .preamble(STREAMING_PREAMBLE) + .build(); + + let mut stream = agent.stream_prompt(STREAMING_PROMPT).await; + let response = collect_stream_final_response(&mut stream) + .await + .expect("streaming prompt should succeed"); + + assert_nonempty_response(&response); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_tools_smoke() { + let client = openai::Client::from_env().expect("client should build"); + let agent = client + .agent(openai::GPT_5_5) + .preamble(TOOLS_PREAMBLE) + .tool(Adder) + .tool(Subtract) + .build(); + + let response = agent + .prompt(TOOLS_PROMPT) + .await + .expect("tool prompt should succeed"); + + assert_mentions_expected_number(&response, -3); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_streaming_tools_smoke() { + let client = openai::Client::from_env().expect("client should build"); + let agent = client + .agent(openai::GPT_5_5) + .preamble(STREAMING_TOOLS_PREAMBLE) + .tool(Adder) + .tool(Subtract) + .build(); + + let mut stream = agent + .stream_prompt(STREAMING_TOOLS_PROMPT) + .multi_turn(3) + .await; + let response = collect_stream_final_response(&mut stream) + .await + .expect("streaming tool prompt should succeed"); + + assert_mentions_expected_number(&response, -3); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_structured_output_smoke() { + let client = openai::Client::from_env().expect("client should build"); + let agent = client.agent(openai::GPT_5_5).build(); + + let response: Gpt55Event = agent + .prompt_typed("Return a concise event object for a local Rust meetup in Seattle.") + .await + .expect("typed prompt should succeed"); + + assert_nonempty_response(&response.title); + assert_nonempty_response(&response.category); + assert_nonempty_response(&response.summary); + + let agent = client + .agent(openai::GPT_5_5) + .output_schema::() + .build(); + let response = agent + .prompt("Return a concise event object for a local Rust meetup in Seattle.") + .await + .expect("output schema prompt should succeed"); + let structured: SmokeStructuredOutput = + serde_json::from_str(&response).expect("structured output should deserialize"); + assert_smoke_structured_output(&structured); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_extractor_smoke() { + let client = openai::Client::from_env().expect("client should build"); + let extractor = client.extractor::(openai::GPT_5_5).build(); + + let response = extractor + .extract_with_usage(EXTRACTOR_TEXT) + .await + .expect("extractor request should succeed"); + + assert_nonempty_response( + response + .data + .first_name + .as_deref() + .expect("first name should be present"), + ); + assert_nonempty_response( + response + .data + .last_name + .as_deref() + .expect("last name should be present"), + ); + assert!(response.usage.total_tokens > 0, "usage should be populated"); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_image_input_smoke() { + let client = openai::Client::from_env().expect("client should build"); + let agent = client + .agent(openai::GPT_5_5) + .preamble("You are an image describer.") + .build(); + let image_bytes = std::fs::read(IMAGE_FIXTURE_PATH).expect("fixture image should be readable"); + let image = Image { + data: DocumentSourceKind::base64(&BASE64_STANDARD.encode(image_bytes)), + media_type: Some(ImageMediaType::JPEG), + detail: Some(ImageDetail::Auto), + ..Default::default() + }; + + let response = agent + .prompt(image) + .await + .expect("image prompt should succeed"); + + assert_nonempty_response(&response); + assert_contains_any_case_insensitive(&response, &["ant", "insect"]); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_reasoning_nonstreaming_smoke() { + let client = openai::Client::from_env().expect("client should build"); + reasoning::run_reasoning_roundtrip_nonstreaming(ReasoningRoundtripAgent::new( + client.completion_model(openai::GPT_5_5), + Some(gpt_5_5_reasoning_params()), + )) + .await; +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_reasoning_streaming_smoke() { + let client = openai::Client::from_env().expect("client should build"); + reasoning::run_reasoning_roundtrip_streaming(ReasoningRoundtripAgent::new( + client.completion_model(openai::GPT_5_5), + Some(gpt_5_5_reasoning_params()), + )) + .await; +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_reasoning_tool_roundtrip_smoke() { + let call_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0)); + let client = openai::Client::from_env().expect("client should build"); + let agent = client + .agent(openai::GPT_5_5) + .preamble(reasoning::TOOL_SYSTEM_PROMPT) + .max_tokens(4096) + .tool(WeatherTool::new(call_count.clone())) + .additional_params(gpt_5_5_reasoning_params()) + .build(); + + let result = agent + .chat(reasoning::TOOL_USER_PROMPT, Vec::::new()) + .await + .expect("reasoning tool chat should succeed"); + + reasoning::assert_nonstreaming_universal(&result, &call_count, "openai"); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn responses_reasoning_streaming_tool_roundtrip_smoke() { + let call_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0)); + let client = openai::Client::from_env().expect("client should build"); + let agent = client + .agent(openai::GPT_5_5) + .preamble(reasoning::TOOL_SYSTEM_PROMPT) + .max_tokens(4096) + .tool(WeatherTool::new(call_count.clone())) + .additional_params(gpt_5_5_reasoning_params()) + .build(); + + let stream = agent + .stream_chat(reasoning::TOOL_USER_PROMPT, Vec::::new()) + .multi_turn(3) + .await; + + let stats = reasoning::collect_stream_stats(stream, "openai").await; + reasoning::assert_universal(&stats, &call_count, "openai"); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn chat_completions_prompt_smoke() { + let client = openai::Client::from_env() + .expect("client should build") + .completions_api(); + let agent = client + .agent(openai::GPT_5_5) + .preamble(BASIC_PREAMBLE) + .build(); + + let response = agent + .prompt(BASIC_PROMPT) + .await + .expect("chat completions prompt should succeed"); + + assert_nonempty_response(&response); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn chat_completions_streaming_prompt_smoke() { + let client = openai::Client::from_env() + .expect("client should build") + .completions_api(); + let agent = client + .agent(openai::GPT_5_5) + .preamble(STREAMING_PREAMBLE) + .build(); + + let mut stream = agent.stream_prompt(STREAMING_PROMPT).await; + let response = collect_stream_final_response(&mut stream) + .await + .expect("chat completions streaming prompt should succeed"); + + assert_nonempty_response(&response); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn chat_completions_tools_smoke() { + let client = openai::Client::from_env() + .expect("client should build") + .completions_api(); + let agent = client + .agent(openai::GPT_5_5) + .preamble(TOOLS_PREAMBLE) + .tool(Adder) + .tool(Subtract) + .build(); + + let response = agent + .prompt(TOOLS_PROMPT) + .await + .expect("chat completions tool prompt should succeed"); + + assert_mentions_expected_number(&response, -3); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn chat_completions_streaming_tools_smoke() { + let client = openai::Client::from_env() + .expect("client should build") + .completions_api(); + let agent = client + .agent(openai::GPT_5_5) + .preamble(STREAMING_TOOLS_PREAMBLE) + .tool(Adder) + .tool(Subtract) + .build(); + + let mut stream = agent.stream_prompt(STREAMING_TOOLS_PROMPT).await; + let response = collect_stream_final_response(&mut stream) + .await + .expect("chat completions streaming tool prompt should succeed"); + + assert_mentions_expected_number(&response, -3); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn chat_completions_structured_output_smoke() { + let client = openai::Client::from_env() + .expect("client should build") + .completions_api(); + let agent = client + .agent(openai::GPT_5_5) + .output_schema::() + .build(); + + let response = agent + .prompt("Return a concise event object for a local Rust meetup in Seattle.") + .await + .expect("chat completions output schema prompt should succeed"); + let structured: SmokeStructuredOutput = + serde_json::from_str(&response).expect("structured output should deserialize"); + + assert_smoke_structured_output(&structured); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn chat_completions_extractor_smoke() { + let client = openai::Client::from_env() + .expect("client should build") + .completions_api(); + let extractor = client.extractor::(openai::GPT_5_5).build(); + + let response = extractor + .extract_with_usage(EXTRACTOR_TEXT) + .await + .expect("chat completions extractor request should succeed"); + + assert_nonempty_response( + response + .data + .first_name + .as_deref() + .expect("first name should be present"), + ); + assert_nonempty_response( + response + .data + .last_name + .as_deref() + .expect("last name should be present"), + ); + assert!(response.usage.total_tokens > 0, "usage should be populated"); +} + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn chat_completions_image_input_smoke() { + let client = openai::Client::from_env() + .expect("client should build") + .completions_api(); + let agent = client + .agent(openai::GPT_5_5) + .preamble("You are an image describer.") + .build(); + let image_bytes = std::fs::read(IMAGE_FIXTURE_PATH).expect("fixture image should be readable"); + let image = Image { + data: DocumentSourceKind::base64(&BASE64_STANDARD.encode(image_bytes)), + media_type: Some(ImageMediaType::JPEG), + detail: Some(ImageDetail::Auto), + ..Default::default() + }; + + let response = agent + .prompt(image) + .await + .expect("chat completions image prompt should succeed"); + + assert_nonempty_response(&response); + assert_contains_any_case_insensitive(&response, &["ant", "insect"]); +} + +#[cfg(feature = "websocket")] +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY and --features websocket"] +async fn responses_websocket_smoke() -> anyhow::Result<()> { + let client = openai::Client::from_env().expect("client should build"); + let model = client.completion_model(openai::GPT_5_5); + let mut session = client.responses_websocket(openai::GPT_5_5).await?; + + let request = model + .completion_request("Explain one benefit of websocket mode in one sentence.") + .build(); + session.send(request).await?; + + let mut streamed_text = String::new(); + loop { + match session.next_event().await? { + ResponsesWebSocketEvent::Item(item) => { + if let rig::providers::openai::responses_api::streaming::ItemChunkKind::OutputTextDelta(delta) = + item.data + { + streamed_text.push_str(&delta.delta); + } + } + ResponsesWebSocketEvent::Response(chunk) => { + if matches!( + chunk.kind, + rig::providers::openai::responses_api::streaming::ResponseChunkKind::ResponseCompleted + | rig::providers::openai::responses_api::streaming::ResponseChunkKind::ResponseFailed + | rig::providers::openai::responses_api::streaming::ResponseChunkKind::ResponseIncomplete + ) { + break; + } + } + ResponsesWebSocketEvent::Done(_) => {} + ResponsesWebSocketEvent::Error(error) => return Err(anyhow::anyhow!(error.to_string())), + } + } + + assert_nonempty_response(&streamed_text); + session.close().await?; + Ok(()) +} diff --git a/rig/rig-core/tests/openai/image_generation.rs b/rig/rig-core/tests/openai/image_generation.rs index 4a0d06e39..9a6cdfc1b 100644 --- a/rig/rig-core/tests/openai/image_generation.rs +++ b/rig/rig-core/tests/openai/image_generation.rs @@ -24,3 +24,25 @@ async fn image_generation_smoke() { assert_nonempty_bytes(&response.image); } + +#[tokio::test] +#[ignore = "requires OPENAI_API_KEY"] +async fn gpt_image_2_image_generation_smoke() { + let client = openai::Client::from_env().expect("client should build"); + let model = client.image_generation_model(openai::GPT_IMAGE_2); + + let response = model + .image_generation_request() + .prompt(IMAGE_PROMPT) + .width(1024) + .height(1024) + .send() + .await + .expect("gpt-image-2 image generation should succeed"); + + assert_nonempty_bytes(&response.image); + + let output_path = std::env::temp_dir().join("rig-openai-gpt-image-2-smoke.png"); + std::fs::write(&output_path, &response.image).expect("generated image should save to disk"); + println!("saved generated image to {}", output_path.display()); +} diff --git a/rig/rig-core/tests/openai/mod.rs b/rig/rig-core/tests/openai/mod.rs index 3b27127a2..a3311b982 100644 --- a/rig/rig-core/tests/openai/mod.rs +++ b/rig/rig-core/tests/openai/mod.rs @@ -4,6 +4,7 @@ mod audio_generation; mod completions_api; mod extractor; mod extractor_usage; +mod gpt_5_5; #[cfg(feature = "image")] mod image_generation; mod models;