diff --git a/.stats.yml b/.stats.yml
index aac5deaf..6153f6ab 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 74
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2Ftogetherai-04a478c28ccfda001301fb0066e8155fead5a9d42fa31fcd9f6ccbf49add8566.yml
-openapi_spec_hash: f7276d5ac16bebb9e105fe4e065d1e96
-config_hash: 52d213100a0ca1a4b2cdcd2718936b51
+configured_endpoints: 75
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2Ftogetherai-19148ebeed55db4be0c1c60d31c81306386c4c71a2aee8d1eeac8ebfa15c1168.yml
+openapi_spec_hash: 7bc7e54a7356083f1fb639734be306b1
+config_hash: 279a64a8df3e6dba241bfa1e18a04f69
diff --git a/README.md b/README.md
index 2356a6b2..fe86a4f8 100644
--- a/README.md
+++ b/README.md
@@ -471,62 +471,64 @@ with Together() as client:
```bash
# Help
-together files --help
+tg files --help
# Check file
-together files check example.jsonl
+tg files check example.jsonl
# Upload file
-together files upload example.jsonl
+tg files upload example.jsonl
# List files
-together files list
+tg files list
# Retrieve file metadata
-together files retrieve file-6f50f9d1-5b95-416c-9040-0799b2b4b894
+tg files retrieve file-6f50f9d1-5b95-416c-9040-0799b2b4b894
# Retrieve file content
-together files retrieve-content file-6f50f9d1-5b95-416c-9040-0799b2b4b894
+tg files retrieve-content file-6f50f9d1-5b95-416c-9040-0799b2b4b894
# Delete remote file
-together files delete file-6f50f9d1-5b95-416c-9040-0799b2b4b894
+tg files delete file-6f50f9d1-5b95-416c-9040-0799b2b4b894
```
### Fine-tuning
```bash
+# `tg ft` and `tg fine-tuning` are equivalent
+
# Help
-together fine-tuning --help
+tg ft --help
# Create fine-tune job
-together fine-tuning create \
+tg ft create \
--model togethercomputer/llama-2-7b-chat \
--training-file file-711d8724-b3e3-4ae2-b516-94841958117d
# List fine-tune jobs
-together fine-tuning list
+tg ft list
# Retrieve fine-tune job details
-together fine-tuning retrieve ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft retrieve ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
# List fine-tune job events
-together fine-tuning list-events ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft list-events ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
# Cancel running job
-together fine-tuning cancel ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft cancel ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
# Download fine-tuned model weights
-together fine-tuning download ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft download ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
```
### Models
```bash
# Help
-together models --help
+tg models --help
# List models
-together models list
+tg models list
```
## Versioning
@@ -562,201 +564,203 @@ Python 3.9 or higher.
```bash
# Help
-together files --help
+tg files --help
# Check file
-together files check example.jsonl
+tg files check example.jsonl
# Upload file
-together files upload example.jsonl
+tg files upload example.jsonl
# List files
-together files list
+tg files list
# Retrieve file metadata
-together files retrieve file-6f50f9d1-5b95-416c-9040-0799b2b4b894
+tg files retrieve file-6f50f9d1-5b95-416c-9040-0799b2b4b894
# Retrieve file content
-together files retrieve-content file-6f50f9d1-5b95-416c-9040-0799b2b4b894
+tg files retrieve-content file-6f50f9d1-5b95-416c-9040-0799b2b4b894
# Delete remote file
-together files delete file-6f50f9d1-5b95-416c-9040-0799b2b4b894
+tg files delete file-6f50f9d1-5b95-416c-9040-0799b2b4b894
```
### Fine-tuning
```bash
+# `tg ft` and `tg fine-tuning` are equivalent
+
# Help
-together fine-tuning --help
+tg ft --help
# Create fine-tune job
-together fine-tuning create \
+tg ft create \
--model togethercomputer/llama-2-7b-chat \
--training-file file-711d8724-b3e3-4ae2-b516-94841958117d
# List fine-tune jobs
-together fine-tuning list
+tg ft list
# Retrieve fine-tune job details
-together fine-tuning retrieve ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft retrieve ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
# List fine-tune job events
-together fine-tuning list-events ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft list-events ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
# List fine-tune checkpoints
-together fine-tuning list-checkpoints ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft list-checkpoints ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
# Cancel running job
-together fine-tuning cancel ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft cancel ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
# Download fine-tuned model weights
-together fine-tuning download ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft download ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
# Delete fine-tuned model weights
-together fine-tuning delete ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
+tg ft delete ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b
```
### Models
```bash
# Help
-together models --help
+tg models --help
# List models
-together models list
+tg models list
# Upload a model
-together models upload --model-name my-org/my-model --model-source s3-or-hugging-face
+tg models upload --model-name my-org/my-model --model-source s3-or-hugging-face
```
### Clusters
```bash
# Help
-together beta clusters --help
+tg beta clusters --help
# Create a cluster
-together beta clusters create
+tg beta clusters create
# List clusters
-together beta clusters list
+tg beta clusters list
# Retrieve cluster details
-together beta clusters retrieve [cluster-id]
+tg beta clusters retrieve [cluster-id]
# Update a cluster
-together beta clusters update [cluster-id]
+tg beta clusters update [cluster-id]
# Retrieve Together cluster configuration options such as regions, gpu types and drivers available
-together beta clusters list-regions
+tg beta clusters list-regions
```
##### Cluster Storage
```bash
# Help
-together beta clusters storage --help
+tg beta clusters storage --help
# Create cluster storage volume
-together beta clusters storage create
+tg beta clusters storage create
# List storage volumes
-together beta clusters storage list
+tg beta clusters storage list
# Retrieve storage volume
-together beta clusters storage retrieve [storage-id]
+tg beta clusters storage retrieve [storage-id]
# Delete storage volume
-together beta clusters storage delete [storage-id]
+tg beta clusters storage delete [storage-id]
```
### Jig (Container Deployments)
```bash
# Help
-together beta jig --help
+tg beta jig --help
# Initialize jig configuration (creates pyproject.toml)
-together beta jig init
+tg beta jig init
# Generate Dockerfile from config
-together beta jig dockerfile
+tg beta jig dockerfile
# Build container image
-together beta jig build
-together beta jig build --tag v1.0 --warmup
+tg beta jig build
+tg beta jig build --tag v1.0 --warmup
# Push image to registry
-together beta jig push
-together beta jig push --tag v1.0
+tg beta jig push
+tg beta jig push --tag v1.0
# Deploy model (builds, pushes, and deploys)
-together beta jig deploy
-together beta jig deploy --build-only
-together beta jig deploy --image existing-image:tag
+tg beta jig deploy
+tg beta jig deploy --build-only
+tg beta jig deploy --image existing-image:tag
# Get deployment status
-together beta jig status
+tg beta jig status
# Get deployment endpoint URL
-together beta jig endpoint
+tg beta jig endpoint
# View deployment logs
-together beta jig logs
-together beta jig logs --follow
+tg beta jig logs
+tg beta jig logs --follow
# Destroy deployment
-together beta jig destroy
+tg beta jig destroy
# Get queue metrics
-together beta jig queue-status
+tg beta jig queue-status
# List all deployments
-together beta jig list
+tg beta jig list
```
##### Jig Secrets
```bash
# Help
-together beta jig secrets --help
+tg beta jig secrets --help
# Set a secret (creates or updates)
-together beta jig secrets set --name MY_SECRET --value "secret-value"
+tg beta jig secrets set --name MY_SECRET --value "secret-value"
# Remove a secret from local state
-together beta jig secrets unset --name MY_SECRET
+tg beta jig secrets unset --name MY_SECRET
# List all secrets with sync status
-together beta jig secrets list
+tg beta jig secrets list
```
##### Jig Volumes
```bash
# Help
-together beta jig volumes --help
+tg beta jig volumes --help
# Create a volume and upload files from directory
-together beta jig volumes create --name my-volume --source ./data
+tg beta jig volumes create --name my-volume --source ./data
# Update a volume with new files
-together beta jig volumes update --name my-volume --source ./data
+tg beta jig volumes update --name my-volume --source ./data
# Set volume mount path for deployment
-together beta jig volumes set --name my-volume --mount-path /app/data
+tg beta jig volumes set --name my-volume --mount-path /app/data
# Remove volume from deployment config (does not delete remote volume)
-together beta jig volumes unset --name my-volume
+tg beta jig volumes unset --name my-volume
# Delete a volume
-together beta jig volumes delete --name my-volume
+tg beta jig volumes delete --name my-volume
# Describe a volume
-together beta jig volumes describe --name my-volume
+tg beta jig volumes describe --name my-volume
# List all volumes
-together beta jig volumes list
+tg beta jig volumes list
```
## Contributing
diff --git a/api.md b/api.md
index c09afbe4..6c4ca03e 100644
--- a/api.md
+++ b/api.md
@@ -184,6 +184,7 @@ from together.types import (
FineTuningEstimatePriceResponse,
FineTuningListCheckpointsResponse,
FineTuningListEventsResponse,
+ FineTuningListMetricsResponse,
)
```
@@ -197,6 +198,7 @@ Methods:
- client.fine_tuning.estimate_price(\*\*params) -> FineTuningEstimatePriceResponse
- client.fine_tuning.list_checkpoints(id) -> FineTuningListCheckpointsResponse
- client.fine_tuning.list_events(id) -> FineTuningListEventsResponse
+- client.fine_tuning.list_metrics(id) -> FineTuningListMetricsResponse
# CodeInterpreter
diff --git a/pyproject.toml b/pyproject.toml
index 292341c2..c41f9aa4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -26,6 +26,7 @@ dependencies = [
"types-pyyaml>=6.0.12.20250915",
"tomli>=2.0.0; python_version < '3.11'",
"detect-agent>=0.2.0",
+ "asciichartpy>=0.7.0",
]
requires-python = ">= 3.10"
diff --git a/src/together/_qs.py b/src/together/_qs.py
index de8c99bc..4127c19c 100644
--- a/src/together/_qs.py
+++ b/src/together/_qs.py
@@ -2,17 +2,13 @@
from typing import Any, List, Tuple, Union, Mapping, TypeVar
from urllib.parse import parse_qs, urlencode
-from typing_extensions import Literal, get_args
+from typing_extensions import get_args
-from ._types import NotGiven, not_given
+from ._types import NotGiven, ArrayFormat, NestedFormat, not_given
from ._utils import flatten
_T = TypeVar("_T")
-
-ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
-NestedFormat = Literal["dots", "brackets"]
-
PrimitiveData = Union[str, int, float, bool, None]
# this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"]
# https://github.com/microsoft/pyright/issues/3555
diff --git a/src/together/_types.py b/src/together/_types.py
index cf3a156f..470d93c0 100644
--- a/src/together/_types.py
+++ b/src/together/_types.py
@@ -47,6 +47,9 @@
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
_T = TypeVar("_T")
+ArrayFormat = Literal["comma", "repeat", "indices", "brackets"]
+NestedFormat = Literal["dots", "brackets"]
+
# Approximates httpx internal ProxiesTypes and RequestFiles types
# while adding support for `PathLike` instances
diff --git a/src/together/_utils/_utils.py b/src/together/_utils/_utils.py
index e772856c..8e12a1aa 100644
--- a/src/together/_utils/_utils.py
+++ b/src/together/_utils/_utils.py
@@ -17,11 +17,11 @@
)
from pathlib import Path
from datetime import date, datetime
-from typing_extensions import TypeGuard
+from typing_extensions import TypeGuard, get_args
import sniffio
-from .._types import Omit, NotGiven, FileTypes, HeadersLike
+from .._types import Omit, NotGiven, FileTypes, ArrayFormat, HeadersLike
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
@@ -40,25 +40,45 @@ def extract_files(
query: Mapping[str, object],
*,
paths: Sequence[Sequence[str]],
+ array_format: ArrayFormat = "brackets",
) -> list[tuple[str, FileTypes]]:
"""Recursively extract files from the given dictionary based on specified paths.
A path may look like this ['foo', 'files', '', 'data'].
+ ``array_format`` controls how ```` segments contribute to the emitted
+ field name. Supported values: ``"brackets"`` (``foo[]``), ``"repeat"`` and
+ ``"comma"`` (``foo``), ``"indices"`` (``foo[0]``, ``foo[1]``).
+
Note: this mutates the given dictionary.
"""
files: list[tuple[str, FileTypes]] = []
for path in paths:
- files.extend(_extract_items(query, path, index=0, flattened_key=None))
+ files.extend(_extract_items(query, path, index=0, flattened_key=None, array_format=array_format))
return files
+def _array_suffix(array_format: ArrayFormat, array_index: int) -> str:
+ if array_format == "brackets":
+ return "[]"
+ if array_format == "indices":
+ return f"[{array_index}]"
+ if array_format == "repeat" or array_format == "comma":
+ # Both repeat the bare field name for each file part; there is no
+ # meaningful way to comma-join binary parts.
+ return ""
+ raise NotImplementedError(
+ f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}"
+ )
+
+
def _extract_items(
obj: object,
path: Sequence[str],
*,
index: int,
flattened_key: str | None,
+ array_format: ArrayFormat,
) -> list[tuple[str, FileTypes]]:
try:
key = path[index]
@@ -75,9 +95,11 @@ def _extract_items(
if is_list(obj):
files: list[tuple[str, FileTypes]] = []
- for entry in obj:
- assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
- files.append((flattened_key + "[]", cast(FileTypes, entry)))
+ for array_index, entry in enumerate(obj):
+ suffix = _array_suffix(array_format, array_index)
+ emitted_key = (flattened_key + suffix) if flattened_key else suffix
+ assert_is_file_content(entry, key=emitted_key)
+ files.append((emitted_key, cast(FileTypes, entry)))
return files
assert_is_file_content(obj, key=flattened_key)
@@ -106,6 +128,7 @@ def _extract_items(
path,
index=index,
flattened_key=flattened_key,
+ array_format=array_format,
)
elif is_list(obj):
if key != "":
@@ -117,9 +140,12 @@ def _extract_items(
item,
path,
index=index,
- flattened_key=flattened_key + "[]" if flattened_key is not None else "[]",
+ flattened_key=(
+ (flattened_key if flattened_key is not None else "") + _array_suffix(array_format, array_index)
+ ),
+ array_format=array_format,
)
- for item in obj
+ for array_index, item in enumerate(obj)
]
)
diff --git a/src/together/lib/cli/__init__.py b/src/together/lib/cli/__init__.py
index cda5d806..9cd0c2fe 100644
--- a/src/together/lib/cli/__init__.py
+++ b/src/together/lib/cli/__init__.py
@@ -6,7 +6,6 @@
from typing import Optional, Annotated, get_args, get_origin
import httpx
-import detect_agent
from cyclopts import App, Group, Parameter, CycloptsError, MissingArgumentError
from rich.markup import escape as escape_rich_markup
@@ -281,7 +280,7 @@ async def run_command() -> None:
## Files API commands
files_app = app.command(App(name="files", help="File API commands"))
files_app.command(f"{_CLI}.files.upload:upload", help="Upload files for fine-tuning, evals, etc.")
-files_app.command(f"{_CLI}.files.list:list", help="List files on the Together platform")
+files_app.command(f"{_CLI}.files.list:list", alias="ls", help="List files on the Together platform")
files_app.command(f"{_CLI}.files.retrieve:retrieve", help="Retrieve metadata for a file from the Together platform")
files_app.command(
f"{_CLI}.files.retrieve_content:retrieve_content", help="Download the contents of a file from the Together platform"
@@ -290,9 +289,11 @@ async def run_command() -> None:
files_app.command(f"{_CLI}.files.check:check", help="Check a local file for issues")
# Fine-tuning API commands
-fine_tuning_app = app.command(App(name="fine-tuning", help="Fine-tuning API commands"))
+fine_tuning_app = app.command(App(name="fine-tuning", help="Fine-tuning API commands", alias="ft"))
fine_tuning_app.command((f"{_CLI}.fine_tuning.create:create"), help="Start a new fine-tuning job")
-fine_tuning_app.command((f"{_CLI}.fine_tuning.list:list"), help="List fine-tuning jobs on the Together platform")
+fine_tuning_app.command(
+ (f"{_CLI}.fine_tuning.list:list"), alias="ls", help="List fine-tuning jobs on the Together platform"
+)
fine_tuning_app.command(
(f"{_CLI}.fine_tuning.retrieve:retrieve"), help="Retrieve metadata for a fine-tuning job from the Together platform"
)
@@ -311,10 +312,14 @@ async def run_command() -> None:
fine_tuning_app.command(
(f"{_CLI}.fine_tuning.delete:delete"), help="Delete a fine-tuning job from the Together platform"
)
+fine_tuning_app.command(
+ (f"{_CLI}.fine_tuning.get_metrics:get_metrics"), help="Retrieve training metrics for a fine-tuning job"
+)
+
## Models API commands
models_app = app.command(App(name="models", help="Models API commands"))
-models_app.command((f"{_CLI}.models.list:list"), help="List models on the Together platform")
+models_app.command((f"{_CLI}.models.list:list"), alias="ls", help="List models on the Together platform")
models_app.command((f"{_CLI}.models.upload:upload"), help="Upload a model to the Together platform")
## Endpoints API commands
@@ -329,7 +334,7 @@ async def run_command() -> None:
endpoints_app.command((f"{_CLI}.endpoints.stop:stop"), help="Stop an endpoint")
endpoints_app.command((f"{_CLI}.endpoints.start:start"), help="Start an endpoint")
endpoints_app.command((f"{_CLI}.endpoints.delete:delete"), help="Delete an endpoint from the Together platform")
-endpoints_app.command((f"{_CLI}.endpoints.list:list"), help="List endpoints on the Together platform")
+endpoints_app.command((f"{_CLI}.endpoints.list:list"), alias="ls", help="List endpoints on the Together platform")
endpoints_app.command((f"{_CLI}.endpoints.update:update"), help="Update an endpoint on the Together platform")
endpoints_app.command(
(f"{_CLI}.endpoints.availability_zones:availability_zones"), help="List availability zones for deploying models"
@@ -338,7 +343,7 @@ async def run_command() -> None:
## Evals API commands
evals_app = app.command(App(name="evals", help="Evals API commands"))
evals_app.command((f"{_CLI}.evals.create:create"), help="Create a new eval job")
-evals_app.command((f"{_CLI}.evals.list:list"), help="List eval jobs on the Together platform")
+evals_app.command((f"{_CLI}.evals.list:list"), alias="ls", help="List eval jobs on the Together platform")
evals_app.command(
(f"{_CLI}.evals.retrieve:retrieve"), help="Retrieve metadata for an eval job from the Together platform"
)
@@ -358,7 +363,7 @@ async def run_command() -> None:
### Clusters API commands
clusters_app = beta_app.command(App(name="clusters", help="Clusters API commands"))
-clusters_app.command((f"{_CLI}.beta.clusters.list:list"), help="List clusters on the Together platform")
+clusters_app.command((f"{_CLI}.beta.clusters.list:list"), alias="ls", help="List clusters on the Together platform")
clusters_app.command((f"{_CLI}.beta.clusters.create:create"), help="Create a new cluster")
clusters_app.command(
(f"{_CLI}.beta.clusters.retrieve:retrieve"), help="Retrieve metadata for a cluster from the Together platform"
@@ -370,7 +375,7 @@ async def run_command() -> None:
### Clusters > Storage API commands
storage_app = clusters_app.command(App(name="storage", help="Clusters Storage API commands", group="Subcommands"))
-storage_app.command((f"{_CLI}.beta.clusters.storage.list:list"), help="List storage volumes for a cluster")
+storage_app.command((f"{_CLI}.beta.clusters.storage.list:list"), alias="ls", help="List storage volumes for a cluster")
storage_app.command((f"{_CLI}.beta.clusters.storage.create:create"), help="Create a new storage volume for a cluster")
storage_app.command(
(f"{_CLI}.beta.clusters.storage.retrieve:retrieve"),
@@ -398,7 +403,7 @@ async def run_command() -> None:
jig_app.command(
(f"{_CLI}.beta.jig.jig:queue_status_cli"), name="queue-status", help="Get queue metrics for the deployment"
)
-jig_app.command((f"{_CLI}.beta.jig.jig:list_deployments_cli"), name="list", help="List all deployments")
+jig_app.command((f"{_CLI}.beta.jig.jig:list_deployments_cli"), name="list", alias="ls", help="List all deployments")
secrets_app = jig_app.command(App(name="secrets", help="Manage deployment secrets", group="Subcommands"))
secrets_app.command((f"{_CLI}.beta.jig.jig:secrets_set_cli"), name="set", help="Set a secret (create or update)")
@@ -406,7 +411,9 @@ async def run_command() -> None:
secrets_app.command(
(f"{_CLI}.beta.jig.jig:secrets_delete_cli"), name="delete", help="Delete a secret and unset it locally"
)
-secrets_app.command((f"{_CLI}.beta.jig.jig:secrets_list_cli"), name="list", help="List all secrets with sync status")
+secrets_app.command(
+ (f"{_CLI}.beta.jig.jig:secrets_list_cli"), name="list", alias="ls", help="List all secrets with sync status"
+)
### Jig > volumes
storage_app = jig_app.command(App(name="volumes", help="Jig Volumes API commands", group="Subcommands"))
@@ -424,14 +431,16 @@ async def run_command() -> None:
name="describe",
help="Retrieve metadata for a volume from the Together platform",
)
-storage_app.command((f"{_CLI}.beta.jig.jig:jig_volumes_list"), name="list", help="List volumes for a JIG deployment")
+storage_app.command(
+ (f"{_CLI}.beta.jig.jig:jig_volumes_list"), name="list", alias="ls", help="List volumes for a JIG deployment"
+)
def main() -> None:
install_completion(app)
# Shown in the root help page, but not a functional command
- BETA_GROUP_TITLE = "Beta Commands" if detect_agent.determine_agent()["is_agent"] else "⚠️ "
+ BETA_GROUP_TITLE = "Beta Commands"
app.command(App(name="beta clusters", help="Create and manage GPU clusters", group=BETA_GROUP_TITLE))
app.command(App(name="beta jig", help="Container deployment", group=BETA_GROUP_TITLE))
beta_root_app.show = False
diff --git a/src/together/lib/cli/_track_cli.py b/src/together/lib/cli/_track_cli.py
index 003a93cd..04c9355d 100644
--- a/src/together/lib/cli/_track_cli.py
+++ b/src/together/lib/cli/_track_cli.py
@@ -196,12 +196,30 @@ def _legacy_command_before_first_option(tokens: list[str]) -> tuple[str, bool]:
return (" ".join(parts), is_beta_command)
+# First subcommand token only (alias -> primary name) for stable telemetry.
+_TELEMETRY_SUBCOMMAND_ALIASES: dict[str, str] = {"ft": "fine-tuning"}
+
+
+def _canonical_telemetry_command(cmd: str) -> str:
+ if not cmd:
+ return cmd
+ parts = cmd.split()
+ primary = _TELEMETRY_SUBCOMMAND_ALIASES.get(parts[0])
+ if primary is not None:
+ parts[0] = primary
+ parts = ["list" if p == "ls" else p for p in parts]
+ return " ".join(parts)
+
+
def parse_command_and_flags(app: App, tokens: list[str]) -> tuple[str, list[str], bool]:
"""
Return telemetry-safe command path (registered subcommands only), argument *names* from
cyclopts resolution (including positional parameters — values are never returned), and
whether the invocation is under ``beta``.
+ Subcommand aliases (e.g. ``ft``) are normalized to their primary names (e.g. ``fine-tuning``).
+ The ``list`` alias ``ls`` is normalized to ``list`` in the returned command path.
+
Requires the root cyclopts :class:`~cyclopts.App` so positional values are not mistaken
for subcommand tokens (e.g. ``beta jig secrets set ``).
"""
@@ -227,14 +245,39 @@ def parse_command_and_flags(app: App, tokens: list[str]) -> tuple[str, list[str]
except CycloptsError:
explicit_args.extend(_long_option_names_in_tokens(rest_after_chain))
- return (parsed_command, explicit_args, is_beta_command)
+ return (_canonical_telemetry_command(parsed_command), explicit_args, is_beta_command)
-def sanitize_cli_error_message(msg: str) -> str:
- """Sanitize the error messages caught for telemetry to remove sensitive information."""
- s = msg.strip()
- if len(s) > _ERROR_MESSAGE_MAX_LEN:
- s = s[:_ERROR_MESSAGE_MAX_LEN] + "…"
+def _redact_secrets_in_error_text(s: str) -> str:
+ """Apply secret redaction patterns to error text (run before length truncation)."""
+ # `https://user:pass@host/...` and `http://...`
+ s = re.sub(
+ r"(?i)(https?://)([^:/?#\s]+):([^@]+)@",
+ r"\1:@",
+ s,
+ )
+ # Query / fragment: `?token=...`, `&api_key=...`, etc.
+ s = re.sub(
+ r"(?i)([?])(?:access_?token|id_?token|refresh_?token|api_?key|apikey|"
+ r"password|passwd|client_?secret|token|secret|credentials)=([^\s]+)",
+ r"\1",
+ s,
+ )
+ # JWT (header typically base64 of `{"` → eyJ; also long JWS / opaque three-part tokens)
+ s = re.sub(
+ r"(?i)\b(eyJ[a-z0-9_-]*\.[a-z0-9_-]*\.[a-z0-9_-]*)\b",
+ "",
+ s,
+ )
+ s = re.sub(
+ r"(?i)\b([a-z0-9_-]{20,}\.[a-z0-9_-]{20,}\.[a-z0-9_-]{20,})\b",
+ "",
+ s,
+ )
+ # OpenAI / common `sk-…` API keys; Hugging Face `hf_…`; Together-style `tog_…`
+ s = re.sub(r"(?i)(?", s)
+ s = re.sub(r"(?i)(?", s)
+ s = re.sub(r"(?i)(?", s)
s = re.sub(r"(?i)(bearer\s+)[A-Za-z0-9._\-/+]{20,}", r"\1", s)
s = re.sub(
r"(?i)(api[_-]?key\s*[\"':=]\s*|api[_-]?key\s+)([A-Za-z0-9._\-]{20,})",
@@ -242,6 +285,19 @@ def sanitize_cli_error_message(msg: str) -> str:
s,
)
s = re.sub(r"(?i)(Authorization:\s*)([^\s]+)", r"\1", s)
+ s = re.sub(
+ r"(?i)(Basic\s+)([A-Za-z0-9+/=]{8,})",
+ r"\1",
+ s,
+ )
+ return s
+
+
+def sanitize_cli_error_message(msg: str) -> str:
+ """Sanitize the error messages caught for telemetry to remove sensitive information."""
+ s = _redact_secrets_in_error_text(msg.strip())
+ if len(s) > _ERROR_MESSAGE_MAX_LEN:
+ s = s[:_ERROR_MESSAGE_MAX_LEN] + "…"
return s
diff --git a/src/together/lib/cli/api/beta/clusters/_util.py b/src/together/lib/cli/api/beta/clusters/_util.py
index f8b57b7b..6aa9927c 100644
--- a/src/together/lib/cli/api/beta/clusters/_util.py
+++ b/src/together/lib/cli/api/beta/clusters/_util.py
@@ -4,9 +4,11 @@
from together.lib.cli.utils._console import console
from together.lib.cli.components.list import ListTable
+EMPTY_MESSAGE = "You don't have any clusters yet. To create your first cluster run:\n [dim]-[/dim] [primary]tg beta clusters create[/primary]"
+
def print_clusters(clusters: List[Cluster]) -> None:
- table = ListTable()
+ table = ListTable(title="Clusters", empty_message=EMPTY_MESSAGE)
table.add_column("ID", ratio=2)
table.add_primary_column("Name", ratio=2)
table.add_column("Status")
diff --git a/src/together/lib/cli/api/beta/clusters/get_credentials.py b/src/together/lib/cli/api/beta/clusters/get_credentials.py
index 95f0d56c..b1937008 100644
--- a/src/together/lib/cli/api/beta/clusters/get_credentials.py
+++ b/src/together/lib/cli/api/beta/clusters/get_credentials.py
@@ -32,11 +32,12 @@ async def get_credentials(
overwrite_existing: Annotated[
bool,
Parameter(
- help="If there is a conflict with the existing kubeconfig, overwrite the existing kubeconfig instead of raising an error."
+ help="If there is a conflict with the existing kubeconfig, overwrite the existing kubeconfig instead of raising an error.",
+ negative=False,
),
] = False,
set_default_context: Annotated[
- bool, Parameter(help="If true, set the default context to the cluster name.")
+ bool, Parameter(help="If true, set the default context to the cluster name.", negative=False)
] = False,
*,
config: CLIConfigParameter,
diff --git a/src/together/lib/cli/api/beta/clusters/storage/delete.py b/src/together/lib/cli/api/beta/clusters/storage/delete.py
index 8fe4280b..e6cce77c 100644
--- a/src/together/lib/cli/api/beta/clusters/storage/delete.py
+++ b/src/together/lib/cli/api/beta/clusters/storage/delete.py
@@ -7,9 +7,11 @@
from together.lib.cli.components.list import ListTable
from together.lib.cli.components.loader import show_loading_status
+EMPTY_MESSAGE = "You don't have any storage volumes yet. To create your first storage volume run:\n [dim]-[/dim] [primary]tg beta clusters storage create[/primary]"
+
def _print_storage(storage: ClusterStorage) -> None:
- table = ListTable()
+ table = ListTable(title="Cluster Storage", empty_message=EMPTY_MESSAGE)
table.add_primary_column("ID")
table.add_column("Name")
table.add_column("Size")
diff --git a/src/together/lib/cli/api/beta/clusters/storage/list.py b/src/together/lib/cli/api/beta/clusters/storage/list.py
index 95671cc8..7f616aa8 100644
--- a/src/together/lib/cli/api/beta/clusters/storage/list.py
+++ b/src/together/lib/cli/api/beta/clusters/storage/list.py
@@ -7,6 +7,8 @@
from together.lib.cli.components.loader import show_loading_status
from together.lib.cli.utils._mock_pagination import AfterParameter, mock_pagination
+EMPTY_MESSAGE = "You don't have any storage volumes yet. To create your first storage volume run:\n [dim]-[/dim] [primary]tg beta clusters storage create[/primary]"
+
async def list(
after: AfterParameter = None,
@@ -22,7 +24,7 @@ async def list(
console.print_json(openapi_dumps(response).decode("utf-8"))
return
- table = ListTable()
+ table = ListTable(title="Cluster Storage", empty_message=EMPTY_MESSAGE)
table.add_primary_column("ID")
table.add_column("Name")
table.add_column("Size")
diff --git a/src/together/lib/cli/api/beta/jig/jig.py b/src/together/lib/cli/api/beta/jig/jig.py
index 0dde31f8..bab89112 100644
--- a/src/together/lib/cli/api/beta/jig/jig.py
+++ b/src/together/lib/cli/api/beta/jig/jig.py
@@ -1225,7 +1225,8 @@ async def jig_volumes_list(
console.print_json(openapi_dumps(list_resp).decode())
return
- table = ListTable()
+ EMPTY_MESSAGE = "You don't have any volumes yet. To create your first volume run:\n [dim]-[/dim] [primary]tg beta jig volumes create[/primary]"
+ table = ListTable(title="Volumes", empty_message=EMPTY_MESSAGE)
table.add_primary_column("ID")
table.add_column("Name")
diff --git a/src/together/lib/cli/api/endpoints/create.py b/src/together/lib/cli/api/endpoints/create.py
index 4c5f90d4..99e4f44b 100644
--- a/src/together/lib/cli/api/endpoints/create.py
+++ b/src/together/lib/cli/api/endpoints/create.py
@@ -20,10 +20,14 @@
MaxReplicasParameter = Annotated[int, Parameter(help="Maximum number of replicas to deploy (must be >= 0)")]
HardwareParameter = Annotated[Optional[str], Parameter(help="Hardware configuration to use for inference")]
DisplayNameParameter = Annotated[Optional[str], Parameter(help="A human-readable name for the endpoint")]
-NoPromptCacheParameter = Annotated[Optional[bool], Parameter(help="Deprecated and no longer has any effect.")]
-NoSpeculativeDecodingParameter = Annotated[bool, Parameter(help="Disable speculative decoding for this endpoint")]
+NoPromptCacheParameter = Annotated[
+ Optional[bool], Parameter(help="Deprecated and no longer has any effect.", negative=False, show=False)
+]
+NoSpeculativeDecodingParameter = Annotated[
+ bool, Parameter(help="Disable speculative decoding for this endpoint", negative=False)
+]
NoAutoStartParameter = Annotated[
- bool, Parameter(help="Create the endpoint in STOPPED state instead of auto-starting it")
+ bool, Parameter(help="Create the endpoint in STOPPED state instead of auto-starting it", negative=False)
]
InactiveTimeoutParameter = Annotated[
Optional[int],
@@ -34,7 +38,7 @@
AvailabilityZoneParameter = Annotated[
Optional[str], Parameter(help="Start endpoint in specified availability zone (e.g., us-central-4b)")
]
-WaitParameter = Annotated[bool, Parameter(help="Wait for the endpoint to be ready after creation")]
+WaitParameter = Annotated[bool, Parameter(help="Wait for the endpoint to be ready after creation", negative=False)]
@handle_endpoint_api_errors("Endpoints")
diff --git a/src/together/lib/cli/api/endpoints/hardware.py b/src/together/lib/cli/api/endpoints/hardware.py
index c40992bd..308878e3 100644
--- a/src/together/lib/cli/api/endpoints/hardware.py
+++ b/src/together/lib/cli/api/endpoints/hardware.py
@@ -30,7 +30,7 @@ async def hardware(
console.print_json(openapi_dumps(hardware_options.data).decode("utf-8"))
return
- table = ListTable("Hardware")
+ table = ListTable("Hardware", empty_message="No hardware options found")
table.add_primary_column("Hardware ID")
table.add_column("GPU")
table.add_column("Memory")
diff --git a/src/together/lib/cli/api/endpoints/list.py b/src/together/lib/cli/api/endpoints/list.py
index 09b7dbd9..fb598028 100644
--- a/src/together/lib/cli/api/endpoints/list.py
+++ b/src/together/lib/cli/api/endpoints/list.py
@@ -18,9 +18,12 @@
async def list(
_type: Annotated[
Optional[Literal["dedicated", "serverless"]],
- Parameter(name="--type", help="Deprecated and no longer has any effect."),
+ Parameter(name="--type", help="Deprecated and no longer has any effect.", show=False),
+ ] = None,
+ _mine: Annotated[
+ Optional[bool],
+ Parameter(name="--mine", help="Deprecated and no longer has any effect.", negative=False, show=False),
] = None,
- _mine: Annotated[Optional[bool], Parameter(name="--mine", help="Deprecated and no longer has any effect.")] = None,
usage_type: Annotated[
Optional[Literal["on-demand", "reserved"]], Parameter(help="Filter by usage type options")
] = None,
@@ -42,11 +45,8 @@ async def list(
console.print_json(openapi_dumps(endpoints_to_display).decode("utf-8"))
return
- if len(endpoints_to_display) == 0:
- console.print("No dedicated endpoints found")
- return
-
- table = ListTable("Endpoints")
+ EMPTY_MESSAGE = "You don't have any dedicated endpoints yet. To create your first endpoint run:\n [dim]-[/dim] [primary]tg endpoints create[/primary]"
+ table = ListTable("Endpoints", empty_message=EMPTY_MESSAGE)
table.add_primary_column("ID")
table.add_column("Name", ratio=2)
table.add_column("State")
diff --git a/src/together/lib/cli/api/endpoints/start.py b/src/together/lib/cli/api/endpoints/start.py
index 42105f4d..98625b69 100644
--- a/src/together/lib/cli/api/endpoints/start.py
+++ b/src/together/lib/cli/api/endpoints/start.py
@@ -16,7 +16,7 @@ async def start(
str,
Parameter(required=True, help="The ID of the endpoint to start"),
],
- wait: Annotated[bool, Parameter(help="Wait for the endpoint to start")] = False,
+ wait: Annotated[bool, Parameter(help="Wait for the endpoint to start", negative=False)] = False,
*,
config: CLIConfigParameter,
) -> None:
diff --git a/src/together/lib/cli/api/endpoints/stop.py b/src/together/lib/cli/api/endpoints/stop.py
index 33a75a50..d160eb14 100644
--- a/src/together/lib/cli/api/endpoints/stop.py
+++ b/src/together/lib/cli/api/endpoints/stop.py
@@ -15,7 +15,7 @@
@handle_endpoint_api_errors("Endpoints")
async def stop(
endpoint_id: str,
- wait: Annotated[bool, Parameter(help="Wait for the endpoint to stop")] = False,
+ wait: Annotated[bool, Parameter(help="Wait for the endpoint to stop", negative=False)] = False,
*,
config: CLIConfigParameter,
) -> None:
diff --git a/src/together/lib/cli/api/evals/list.py b/src/together/lib/cli/api/evals/list.py
index 87393175..c5f46952 100644
--- a/src/together/lib/cli/api/evals/list.py
+++ b/src/together/lib/cli/api/evals/list.py
@@ -44,7 +44,7 @@ async def list(
console.print_json(openapi_dumps(data).decode("utf-8"))
return
- table = ListTable("Evals")
+ table = ListTable("Evals", empty_message="No evals found")
table.add_primary_column("Workflow ID", ratio=2)
table.add_column("Type")
table.add_column("Status")
diff --git a/src/together/lib/cli/api/files/list.py b/src/together/lib/cli/api/files/list.py
index 74931165..d752ef7d 100644
--- a/src/together/lib/cli/api/files/list.py
+++ b/src/together/lib/cli/api/files/list.py
@@ -29,7 +29,10 @@ async def list(
console.print_json(openapi_dumps(files_to_display).decode("utf-8"))
return
- table = ListTable(title="Files")
+ table = ListTable(
+ title="Files",
+ empty_message="You don't have any files yet. To upload your first file run:\n [dim]-[/dim] [primary]tg files upload[/primary]",
+ )
table.add_primary_column("ID")
table.add_column("File name")
table.add_column("Size")
diff --git a/src/together/lib/cli/api/fine_tuning/__init__.py b/src/together/lib/cli/api/fine_tuning/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/together/lib/cli/api/fine_tuning/delete.py b/src/together/lib/cli/api/fine_tuning/delete.py
index 7affeb6d..735dcc14 100644
--- a/src/together/lib/cli/api/fine_tuning/delete.py
+++ b/src/together/lib/cli/api/fine_tuning/delete.py
@@ -14,7 +14,7 @@
async def delete(
fine_tune_id: str,
force: Annotated[Optional[bool], Parameter(negative="", help="Force deletion without confirmation")] = False,
- quiet: Annotated[Optional[bool], Parameter(negative="", help="Deprecated, use --force instead")] = None,
+ quiet: Annotated[Optional[bool], Parameter(negative="", help="Deprecated, use --force instead", show=False)] = None,
*,
config: CLIConfigParameter,
) -> None:
diff --git a/src/together/lib/cli/api/fine_tuning/get_metrics.py b/src/together/lib/cli/api/fine_tuning/get_metrics.py
new file mode 100644
index 00000000..eb251db6
--- /dev/null
+++ b/src/together/lib/cli/api/fine_tuning/get_metrics.py
@@ -0,0 +1,34 @@
+from __future__ import annotations
+
+from typing import Annotated
+
+from cyclopts import Parameter
+
+from together._utils._json import openapi_dumps
+from together.lib.cli.utils.config import CLIConfigParameter
+from together.lib.cli.utils._console import console
+from together.lib.cli.components.loader import show_loading_status
+from together.lib.cli.utils.plot_finetune_metrics import metrics_ascii_charts
+
+
+async def get_metrics(
+ fine_tune_id: Annotated[str, Parameter(help="The ID of the fine-tuning job")],
+ *,
+ config: CLIConfigParameter,
+) -> None:
+ """Retrieve training metrics for a fine-tuning job."""
+ response = await show_loading_status(
+ "Fetching metrics...",
+ config.client.fine_tuning.list_metrics(fine_tune_id),
+ )
+ metrics = response.metrics or []
+
+ if config.json:
+ console.print_json(openapi_dumps(metrics).decode("utf-8"))
+ return
+
+ if not metrics:
+ console.print(f"[muted]No metrics found for job {fine_tune_id}[/muted]")
+ return
+
+ console.print(metrics_ascii_charts(metrics))
diff --git a/src/together/lib/cli/api/fine_tuning/list.py b/src/together/lib/cli/api/fine_tuning/list.py
index 1f5fef2a..107422a4 100644
--- a/src/together/lib/cli/api/fine_tuning/list.py
+++ b/src/together/lib/cli/api/fine_tuning/list.py
@@ -49,7 +49,8 @@ async def list(
print_json(openapi_dumps(fine_tunings_to_display).decode("utf-8"))
return
- table = ListTable()
+ EMPTY_MESSAGE = "You don't have any finetuned models yet. To fine tune your first model run:\n [dim]-[/dim] [primary]tg ft create[/primary]"
+ table = ListTable(empty_message=EMPTY_MESSAGE)
table.add_primary_column("ID")
table.add_column("Base Model")
table.add_column("Suffix")
diff --git a/src/together/lib/cli/api/fine_tuning/list_checkpoints.py b/src/together/lib/cli/api/fine_tuning/list_checkpoints.py
index 5f21c373..fbca957e 100644
--- a/src/together/lib/cli/api/fine_tuning/list_checkpoints.py
+++ b/src/together/lib/cli/api/fine_tuning/list_checkpoints.py
@@ -21,7 +21,10 @@ async def list_checkpoints(
console.print_json(openapi_dumps(checkpoints.data).decode("utf-8"))
return
- table = ListTable(title="Checkpoints")
+ table = ListTable(
+ title="Checkpoints",
+ empty_message=f"No checkpoints found for job {fine_tune_id}",
+ )
table.add_column("ID")
table.add_column("Timestamp")
table.add_primary_column("Type")
@@ -34,11 +37,8 @@ async def list_checkpoints(
)
table.add_row(name, format_timestamp(checkpoint.created_at), checkpoint.checkpoint_type)
- if len(checkpoints.data) == 0:
- console.print(f"No checkpoints found for job {fine_tune_id}")
- return
-
console.print(table)
- console.print(
- "\n[bold dim]To download a checkpoint, use `together fine-tuning download \\[checkpoint-id]`[/bold dim]"
- )
+ if checkpoints.data:
+ console.print(
+ "\n[bold dim]To download a checkpoint, use `together fine-tuning download \\[checkpoint-id]`[/bold dim]"
+ )
diff --git a/src/together/lib/cli/api/fine_tuning/list_events.py b/src/together/lib/cli/api/fine_tuning/list_events.py
index 799b4a29..c1a38c5f 100644
--- a/src/together/lib/cli/api/fine_tuning/list_events.py
+++ b/src/together/lib/cli/api/fine_tuning/list_events.py
@@ -24,7 +24,7 @@ async def list_events(
console.print_json(openapi_dumps(events).decode("utf-8"))
return
- table = ListTable()
+ table = ListTable(empty_message=f"No events found for job {fine_tune_id}")
table.add_primary_column("Type")
table.add_column("Message")
table.add_column("Created At")
diff --git a/src/together/lib/cli/api/fine_tuning/retrieve.py b/src/together/lib/cli/api/fine_tuning/retrieve.py
index 70b37eeb..9cc456ee 100644
--- a/src/together/lib/cli/api/fine_tuning/retrieve.py
+++ b/src/together/lib/cli/api/fine_tuning/retrieve.py
@@ -15,6 +15,7 @@
from together.types.finetune_response import FinetuneResponse
from together.lib.cli.components.loader import show_loading_status
from together.lib.cli.api.fine_tuning.list import status_colors
+from together.lib.cli.utils.plot_finetune_metrics import metrics_block_sparklines
_NEST_INDENT = 4
@@ -174,6 +175,7 @@ async def retrieve(
fine_tune_id: str,
*,
config: CLIConfigParameter,
+ no_plots: bool = False,
) -> None:
"""Retrieve fine-tuning job details."""
response = await show_loading_status(
@@ -186,6 +188,21 @@ async def retrieve(
if response.status in COMPLETED_STATUSES:
_print_job_details(response, fine_tune_id)
+
+ if not no_plots:
+ metrics: list[dict[str, Any]] = []
+ try:
+ metrics_response = await show_loading_status(
+ "Fetching metrics...", config.client.fine_tuning.list_metrics(fine_tune_id)
+ )
+ metrics = metrics_response.metrics or []
+ except Exception:
+ pass
+
+ if metrics:
+ console.print("\n[muted]Training metrics:[/muted]")
+ console.print(metrics_block_sparklines(metrics))
+
return
progress_text = generate_progress_bar(response, datetime.now().astimezone(), use_rich=True)
diff --git a/src/together/lib/cli/components/list.py b/src/together/lib/cli/components/list.py
index 4984562c..4a746344 100644
--- a/src/together/lib/cli/components/list.py
+++ b/src/together/lib/cli/components/list.py
@@ -3,12 +3,17 @@
from typing import Any, Literal
from rich.box import ROUNDED
+from rich.align import Align
+from rich.panel import Panel
from rich.table import Table
from rich.console import Console, RenderResult, ConsoleOptions
+from rich.padding import Padding
class ListTable:
- def __init__(self, title: str | None = None):
+ def __init__(self, title: str | None = None, *, empty_message: str | None = None) -> None:
+ self._title = title
+ self._empty_message = empty_message
self.has_primary_column = False
self.table = Table(
box=ROUNDED,
@@ -36,7 +41,21 @@ def add_column(
def add_row(self, *values: Any) -> None:
self.table.add_row(*values)
+ def _default_empty_message(self) -> str:
+ return "Nothing to show"
+
def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
if self.has_primary_column is False:
raise ValueError("No primary column added")
- yield self.table
+ if self.table.row_count == 0:
+ text = self._empty_message or self._default_empty_message()
+ yield Panel(
+ Align.left(Padding(text, (0, 1, 0, 1))),
+ title=self._title,
+ box=ROUNDED,
+ title_align="left",
+ border_style="table.border",
+ expand=True,
+ )
+ else:
+ yield self.table
diff --git a/src/together/lib/cli/utils/_help_formatter.py b/src/together/lib/cli/utils/_help_formatter.py
index 2f7afe5e..9aa3ccfe 100644
--- a/src/together/lib/cli/utils/_help_formatter.py
+++ b/src/together/lib/cli/utils/_help_formatter.py
@@ -20,7 +20,11 @@ def _names_renderer(entry: HelpEntry) -> str:
"""Combine parameter names and shorts."""
# Commands
if len(entry.names) == 1:
- return entry.names[0]
+ names = entry.names[0]
+ short_part = ", ".join(entry.shorts).strip() if entry.shorts else ""
+ if short_part:
+ return f"{short_part}, {names}"
+ return names
# Parameters
names = " ".join(entry.names[1:]) if entry.names else ""
diff --git a/src/together/lib/cli/utils/plot_finetune_metrics.py b/src/together/lib/cli/utils/plot_finetune_metrics.py
new file mode 100644
index 00000000..97a88a2b
--- /dev/null
+++ b/src/together/lib/cli/utils/plot_finetune_metrics.py
@@ -0,0 +1,147 @@
+"""Fine-tuning metrics plotting utilities.
+
+Public API
+----------
+``metrics_block_sparklines(metrics)``
+ One ▁▂▃▄▅▆▇█ sparkline line per metric — used in ``retrieve``.
+
+``metrics_ascii_charts(metrics, height=6)``
+ One full ASCII line chart per metric — used in ``get-metrics``.
+"""
+
+from __future__ import annotations
+
+from typing import Any
+
+from rich.text import Text
+
+from together.lib.cli.utils.plots import Figure, sparklines
+from together.lib.cli.utils.plots._engine import should_log
+
+_SKIP_KEYS: frozenset[str] = frozenset({"timestamp", "step", "global_step", "epoch"})
+
+
+def _is_skip(k: str) -> bool:
+ base = k.rsplit("/", 1)[-1]
+ return base in _SKIP_KEYS or base.endswith("_step") or base.endswith("_epoch")
+
+
+def _collect_keys(metrics: list[dict[str, Any]]) -> list[str]:
+ """Return plottable numeric keys in insertion order."""
+ keys: list[str] = []
+ seen: set[str] = set()
+ for row in metrics:
+ for k, v in row.items():
+ if k not in seen and isinstance(v, (int, float)) and not _is_skip(k):
+ keys.append(k)
+ seen.add(k)
+ return keys
+
+
+def _get_step(row: dict[str, Any], fallback: int) -> int:
+ """Extract global step, trying several field names before falling back to index."""
+ gs = row.get("global_step", row.get("train/global_step", row.get("step")))
+ return int(gs) if gs is not None else fallback
+
+
+def _step_label(x: float) -> str:
+ return str(int(x))
+
+
+def _no_data() -> Text:
+ t = Text()
+ t.append("No plottable metrics found.", style="muted")
+ return t
+
+
+def _build_figures(
+ metrics: list[dict[str, Any]],
+ *,
+ x_label: Any = _step_label,
+ height: int = 6,
+ width: int = 60,
+) -> list[Figure]:
+ """Return one Figure per plottable metric key."""
+ keys = _collect_keys(metrics)
+ rows = list(metrics)
+ figures: list[Figure] = []
+ for key in keys:
+ steps: list[float] = []
+ values: list[float] = []
+ for i, row in enumerate(rows):
+ v = row.get(key)
+ if isinstance(v, (int, float)):
+ steps.append(float(_get_step(row, fallback=i)))
+ values.append(float(v))
+ if steps:
+ fig = Figure(x_label=x_label, height=height, width=width)
+ fig.add_trace(key, x=steps, y=values, y_log=should_log(values))
+ figures.append(fig)
+ return figures
+
+
+def metrics_block_sparklines(
+ metrics: list[dict[str, Any]],
+ *,
+ width: int = 60,
+) -> Text:
+ """One block-sparkline line per metric, coloured with the CLI theme.
+
+ Args:
+ metrics: List of flat metric dicts (one per training step).
+ width: Sparkline character width (default 60).
+
+ Returns:
+ A ``rich.text.Text`` ready for ``console.print()``.
+ """
+ keys = _collect_keys(metrics)
+ rows = list(metrics)
+ series: dict[str, tuple[list[float], list[float]]] = {}
+ y_log: dict[str, bool] = {}
+ for key in keys:
+ steps: list[float] = []
+ values: list[float] = []
+ for i, row in enumerate(rows):
+ v = row.get(key)
+ if isinstance(v, (int, float)):
+ steps.append(float(_get_step(row, fallback=i)))
+ values.append(float(v))
+ if steps:
+ series[key] = (steps, values)
+ y_log[key] = should_log(values)
+ if not series:
+ return _no_data()
+ return sparklines(series, width=width, y_log=y_log)
+
+
+def metrics_ascii_charts(
+ metrics: list[dict[str, Any]],
+ *,
+ height: int = 6,
+ width: int = 60,
+) -> Text:
+ """One ASCII line chart per metric, with a global-step x-axis.
+
+ Args:
+ metrics: List of flat metric dicts (one per training step).
+ height: Chart body height in rows (default 6).
+ width: Plot character width (default 60).
+
+ Returns:
+ A ``rich.text.Text`` ready for ``console.print()``.
+ """
+ figures = _build_figures(metrics, height=height, width=width)
+ if not figures:
+ return _no_data()
+ text = Text()
+ for i, fig in enumerate(figures):
+ if i > 0:
+ text.append("\n")
+ text.append_text(fig.render())
+ return text
+
+
+__all__ = [
+ "metrics_block_sparklines",
+ "metrics_ascii_charts",
+]
diff --git a/src/together/lib/cli/utils/plots/__init__.py b/src/together/lib/cli/utils/plots/__init__.py
new file mode 100644
index 00000000..b81d42d1
--- /dev/null
+++ b/src/together/lib/cli/utils/plots/__init__.py
@@ -0,0 +1,9 @@
+"""Generic CLI plot utilities."""
+
+from together.lib.cli.utils.plots._engine import Figure, should_log, sparklines
+
+__all__ = [
+ "Figure",
+ "should_log",
+ "sparklines",
+]
diff --git a/src/together/lib/cli/utils/plots/_engine.py b/src/together/lib/cli/utils/plots/_engine.py
new file mode 100644
index 00000000..53034fa8
--- /dev/null
+++ b/src/together/lib/cli/utils/plots/_engine.py
@@ -0,0 +1,525 @@
+"""Generic sparkline and ASCII chart rendering engine.
+
+All functions are domain-agnostic: they accept pre-built data
+and optional display callbacks, making them reusable across fine-tuning,
+inference, cluster analytics, etc.
+
+Data representations
+--------------------
+Internal pipeline functions (``_interpolate``, ``_plot``, …) use *parallel
+dicts*: ``named_xs`` and ``named_ys`` are both ``dict[str, list[float]]``
+keyed by series name. The public ``sparklines`` helper uses a *zipped*
+representation instead: ``series: dict[str, tuple[xs, ys]]``, which is more
+convenient at call sites. ``Figure`` stores the parallel form internally and
+presents the zipped form in its public API.
+"""
+
+from __future__ import annotations
+
+import math
+import bisect
+from typing import Any, Callable
+
+from rich.text import Text
+
+_SPARK_BLOCKS = " ▁▂▃▄▅▆▇█"
+
+# Styles cycled across series in insertion order.
+_SERIES_STYLES = ["white", "green", "yellow", "cyan", "magenta"]
+
+
+def should_log(vals: list[float]) -> bool:
+ """Return True when values span more than 100×, suggesting log scale."""
+ nz = [v for v in vals if v > 0]
+ return len(nz) > 1 and (max(nz) / min(nz)) > 100
+
+
+def _uniform_grid(vals: dict[str, list[float]], n: int) -> list[float]:
+ """Return n evenly-spaced points spanning [min(vals), max(vals)]."""
+ flat = [v for sublist in vals.values() for v in sublist]
+ min_val, max_val = min(flat), max(flat)
+ if n <= 1:
+ return [min_val]
+ return [min_val + (max_val - min_val) * idx / (n - 1) for idx in range(n)]
+
+
+def _interpolate(
+ named_xs: dict[str, list[float]],
+ named_ys: dict[str, list[float]],
+ x_grid: list[float],
+) -> dict[str, list[float]]:
+ """Linearly interpolate each series onto x_grid; clamp at the edges.
+
+ For each grid point:
+ - If it falls before the first data point, use the first y value.
+ - If it falls after the last data point, use the last y value.
+ - Otherwise, linearly interpolate between the two bracketing data points.
+ """
+ results: dict[str, list[float]] = {}
+ for name in named_xs:
+ pairs = sorted(zip(named_xs[name], named_ys[name])) # sort by x
+ xs = [p[0] for p in pairs]
+ ys = [p[1] for p in pairs]
+
+ interpolated: list[float] = []
+ for x_point in x_grid:
+ pos = bisect.bisect_left(xs, x_point)
+ if pos == 0:
+ # x_point is at or before the first data point
+ interpolated.append(ys[0])
+ elif pos == len(xs):
+ # x_point is past the last data point
+ interpolated.append(ys[-1])
+ elif xs[pos] == x_point:
+ # exact match
+ interpolated.append(ys[pos])
+ else:
+ # linear interpolation between xs[pos-1] and xs[pos]
+ left_x, left_y = xs[pos - 1], ys[pos - 1]
+ right_x, right_y = xs[pos], ys[pos]
+ slope = (right_y - left_y) / (right_x - left_x)
+ interpolated.append(left_y + slope * (x_point - left_x))
+
+ results[name] = interpolated
+ return results
+
+
+def _log_transform(
+ named_values: dict[str, list[float]],
+) -> dict[str, list[float]]:
+ """Return new traces with ys replaced by their log10 values."""
+ result: dict[str, list[float]] = {}
+ for name, values in named_values.items():
+ nz = [value for value in values if value > 0]
+ eps = min(nz) * 0.01 if nz else 1e-10
+ result[name] = [math.log10(max(value, eps)) for value in values]
+ return result
+
+
+def _to_plot_rows(
+ interpolated_ys: dict[str, list[float]],
+ y_grid: list[float],
+) -> list[list[int]]:
+ """Map each interpolated y value to its nearest y_grid row index."""
+ plot_rows: list[list[int]] = []
+ for ys in interpolated_ys.values():
+ row = [min(range(len(y_grid)), key=lambda i, _y=y: abs(y_grid[i] - _y)) for y in ys]
+ plot_rows.append(row)
+ return plot_rows
+
+
+def _y_labels(
+ y_grid: list[float],
+ y_log: bool,
+ y_label: Callable[[float], str],
+ left_padding: int,
+) -> tuple[list[str], int]:
+ """Build y-axis tick label strings and compute the label column width."""
+ labels = [y_label(10**y) if y_log else y_label(y) for y in y_grid[::-1]]
+ label_w = max(left_padding, max(len(s) for s in labels))
+ return labels, label_w
+
+
+def _x_labels(
+ x_grid: list[float],
+ n_xticks: int,
+ x_label: Callable[[float], str],
+) -> list[tuple[int, str]]:
+ """Return (column_index, label_string) pairs for each x-axis tick."""
+ width = len(x_grid)
+ x_min = x_grid[0]
+ # Extend by one grid step beyond the last point so the rightmost tick
+ # label shows the true data maximum. round() suppresses floating-point
+ # noise that would otherwise accumulate in the tick value calculations.
+ x_max = round(x_grid[-1] + ((x_grid[-1] - x_grid[0]) / (width - 1) if width > 1 else 0.0), 10)
+ if n_xticks < 2:
+ return [(0, x_label(x_min))]
+ tick_cols = [round(i * (width - 1) / (n_xticks - 1)) for i in range(n_xticks)]
+ tick_vals = [x_min + (x_max - x_min) * i / (n_xticks - 1) for i in range(n_xticks)]
+ return [(col, x_label(val)) for col, val in zip(tick_cols, tick_vals)]
+
+
+def _draw_y_axis(
+ grid: list[list[str]],
+ style_grid: list[list[str]],
+ labels: list[str],
+ label_w: int,
+) -> None:
+ """Fill y-axis labels and ┤/┼ connectors into the grid."""
+ for label, grid_row, style_row in zip(labels, grid, style_grid):
+ label = label.rjust(label_w)
+ for ci, ch in enumerate(label):
+ grid_row[ci] = ch
+ style_row[ci] = "secondary"
+ grid_row[label_w] = "┼"
+ style_row[label_w] = "accent"
+
+
+def _draw_lines(
+ grid: list[list[str]],
+ style_grid: list[list[str]],
+ plot_rows: list[list[int]],
+ styles: list[str],
+ label_w: int,
+) -> None:
+ """Draw all series into the shared grid (last writer wins on collision).
+
+ Coordinate system: y_grid index 0 is the *bottom* of the data range, but
+ grid row 0 is the *top* of the terminal output. The conversion is:
+ screen_row = len(grid) - y_grid_index - 1
+ So a higher y_grid index means a higher data value and a *lower* screen row.
+ """
+ offset = label_w + 1
+ width = len(grid[0])
+ for style, pv in zip(styles, plot_rows):
+ # We look one column ahead (pv[col+1]), so stop one short of the end.
+ for col_idx in range(width - label_w - 2):
+ screen_row = len(grid) - pv[col_idx] - 1 # current column
+ next_screen_row = len(grid) - pv[col_idx + 1] - 1 # next column
+ col = col_idx + offset
+
+ if screen_row == next_screen_row:
+ grid[screen_row][col] = "─"
+ style_grid[screen_row][col] = style
+ continue
+
+ going_down = pv[col_idx] > pv[col_idx + 1] # value decreases → line goes down
+ grid[screen_row][col] = "╮" if going_down else "╯"
+ style_grid[screen_row][col] = style
+ grid[next_screen_row][col] = "╰" if going_down else "╭"
+ style_grid[next_screen_row][col] = style
+ for mid_row in range(min(screen_row, next_screen_row) + 1, max(screen_row, next_screen_row)):
+ grid[mid_row][col] = "│"
+ style_grid[mid_row][col] = style
+
+
+def _draw_x_axis(
+ grid: list[list[str]],
+ style_grid: list[list[str]],
+ label_w: int,
+ x_labels: list[tuple[int, str]],
+) -> None:
+ """Append the └───┬─── border row and tick label row to the grid."""
+ row_len = len(grid[0])
+ width = row_len - label_w - 1
+
+ # Border row: spaces | └ | ─ … ┬ … ─
+ border_chars = list("─" * width)
+ for col, _ in x_labels:
+ border_chars[col] = "┬"
+ border_row = [" "] * label_w + ["└"] + border_chars
+ border_styles = ["secondary"] * label_w + ["accent"] + ["accent"] * width
+ grid.append(border_row)
+ style_grid.append(border_styles)
+
+ # Label row: tick strings centred under their tick column
+ label_row = [" "] * row_len
+ for col, lbl in x_labels:
+ start = label_w + 1 + col - len(lbl) // 2
+ start = max(0, min(start, row_len - len(lbl)))
+ for i, ch in enumerate(lbl):
+ label_row[start + i] = ch
+ grid.append(label_row)
+ style_grid.append(["secondary"] * row_len)
+
+
+def _render_data_row(
+ row: list[str],
+ style_row: list[str],
+) -> Text:
+ """Colorize one grid row, appending each character with its style."""
+ text = Text()
+ for ch, style in zip(row, style_row):
+ text.append(ch, style=style)
+ text.append("\n")
+ return text
+
+
+def _render_body(
+ grid: list[list[str]],
+ style_grid: list[list[str]],
+) -> Text:
+ """Convert the finished grid into a Rich Text object."""
+ text = Text()
+ for row, style_row in zip(grid, style_grid):
+ text.append_text(_render_data_row(row, style_row))
+ return text
+
+
+def _plot(
+ named_xs: dict[str, list[float]],
+ named_ys: dict[str, list[float]],
+ *,
+ styles: dict[str, str],
+ width: int = 60,
+ height: int = 6,
+ x_label: Callable[[float], str] = str,
+ y_label: Callable[[float], str] = str,
+ y_log: bool = False,
+ n_xticks: int = 3,
+ left_padding: int = 8,
+) -> Text:
+ """Render one or more data series as a shared ASCII chart.
+
+ All series are plotted on a common x/y axis. Each series is drawn using
+ the style specified in ``styles``.
+
+ Args:
+ named_xs: Mapping of name → x values.
+ named_ys: Mapping of name → y values.
+ styles: Mapping of name → Rich style string, one entry per trace.
+ Must contain a key for every key in ``traces``.
+ width: Number of character columns in the plot body.
+ height: Number of character rows in the chart body.
+ x_label: Callable that formats an x value into a tick-label string.
+ y_label: Callable that formats a y value into a tick-label string.
+ Receives original values regardless of scale.
+ y_log: When True, values are plotted on a log10 axis and
+ ``y_label`` receives the original (pre-log) values.
+ n_xticks: Number of tick marks and labels on the x-axis (default 3).
+
+ Returns:
+ A ``rich.text.Text`` ready for ``console.print()``.
+ """
+ if not named_xs:
+ t = Text()
+ t.append("No data.", style="muted")
+ return t
+
+ ordered_styles = [styles[name] for name in named_xs]
+
+ x_grid = _uniform_grid(named_xs, width)
+ interpolated_ys = _interpolate(named_xs, named_ys, x_grid)
+ if y_log:
+ interpolated_ys = _log_transform(interpolated_ys)
+ y_grid = _uniform_grid(interpolated_ys, height)
+
+ plot_rows = _to_plot_rows(interpolated_ys, y_grid)
+ y_labels, y_labels_w = _y_labels(y_grid, y_log, y_label, left_padding)
+ x_labels = _x_labels(x_grid, n_xticks, x_label)
+
+ grid: list[list[str]] = [[" "] * (width + y_labels_w + 1) for _ in range(height)]
+ style_grid: list[list[str]] = [["primary"] * (width + y_labels_w + 1) for _ in range(height)]
+
+ _draw_y_axis(grid, style_grid, y_labels, y_labels_w)
+ _draw_lines(grid, style_grid, plot_rows, ordered_styles, y_labels_w)
+ _draw_x_axis(grid, style_grid, y_labels_w, x_labels)
+
+ text = _render_body(grid, style_grid)
+ return text
+
+
+def sparklines(
+ series: dict[str, tuple[list[float], list[float]]],
+ *,
+ width: int = 60,
+ y_log: dict[str, bool] | None = None,
+) -> Text:
+ """Render one block-sparkline line per series, with shared label padding.
+
+ Args:
+ series: Mapping of name → ``(xs, ys)``.
+ width: Sparkline character width (default 60).
+ y_log: Optional mapping of name → bool for log-scale series.
+
+ Returns:
+ A ``rich.text.Text`` ready for ``console.print()``.
+ """
+ if not series:
+ t = Text()
+ t.append("No plottable data.", style="muted")
+ return t
+
+ named_xs = {key: xs for key, (xs, _) in series.items()}
+ named_ys = {key: ys for key, (_, ys) in series.items()}
+ x_grid = _uniform_grid(named_xs, width)
+ interpolated = _interpolate(named_xs, named_ys, x_grid)
+ log_keys = {k for k, v in (y_log or {}).items() if v}
+ if log_keys:
+ transformed = _log_transform({k: interpolated[k] for k in log_keys})
+ interpolated = {**interpolated, **transformed}
+ # Build a separate y_grid per series so log and linear series are never
+ # mixed on the same scale.
+ plot_rows_map = {
+ key: _to_plot_rows(
+ {key: interpolated[key]},
+ _uniform_grid({key: interpolated[key]}, len(_SPARK_BLOCKS)),
+ )[0]
+ for key in series
+ }
+
+ label_w = max(len(k) for k in series)
+ text = Text()
+ for key, (_, ys) in series.items():
+ row = plot_rows_map[key]
+ spark = "".join(_SPARK_BLOCKS[idx] for idx in row).ljust(width)
+ text.append(f" {key:<{label_w}} ", style="muted")
+ text.append(spark, style="white")
+ text.append(f" {ys[0]:.4g} → {ys[-1]:.4g}", style="secondary")
+ text.append("\n")
+
+ return text
+
+
+class Figure:
+ """Composable chart builder.
+
+ ::
+
+ fig = Figure(x_label=lambda s: f"step {s:.0f}", width=80)
+ fig.add_trace("train_loss", x=steps, y=train_losses)
+ fig.add_trace("val_loss", x=steps, y=val_losses)
+ console.print(fig.render())
+
+ Or equivalently::
+
+ fig = Figure()
+ fig.plot(steps, train_losses, label="train_loss")
+ fig.plot(steps, val_losses, label="val_loss")
+ console.print(fig.render())
+
+ """
+
+ def __init__(
+ self,
+ *,
+ x_label: Callable[[float], str] = str,
+ n_xticks: int = 3,
+ width: int = 60,
+ height: int = 6,
+ left_padding: int = 8,
+ ) -> None:
+ """
+ Args:
+ x_label: Callable that formats an x value into a tick-label string.
+ n_xticks: Number of x-axis tick marks and labels (default 3).
+ width: Plot width in terminal characters (default 60).
+ height: Plot height in terminal rows (default 6).
+ left_padding: Minimum width reserved for y-axis tick labels (default 8).
+ """
+ self._xs: dict[str, list[float]] = {}
+ self._ys: dict[str, list[float]] = {}
+ self._y_logs: dict[str, bool] = {}
+ self._y_labels: dict[str, Callable[[float], str]] = {}
+ self._x_label = x_label
+ self._n_xticks = n_xticks
+ self._width = width
+ self._height = height
+ self._left_padding = left_padding
+
+ @staticmethod
+ def _coerce(x: Any, y: Any) -> tuple[list[float], list[float]]:
+ """Sort *x*/*y* by x and return as separate float lists.
+
+ Accepts any iterable of numbers: plain lists, tuples, numpy arrays,
+ pandas Series, range objects, etc.
+ """
+ xs = [float(v) for v in x]
+ ys = [float(v) for v in y]
+ if len(xs) != len(ys):
+ raise ValueError(f"x and y must have the same length, got {len(xs)} vs {len(ys)}")
+ pairs = sorted(zip(xs, ys))
+ return [p[0] for p in pairs], [p[1] for p in pairs]
+
+ def add_trace(
+ self,
+ name: str,
+ *,
+ x: Any,
+ y: Any,
+ y_log: bool = False,
+ y_label: Callable[[float], str] | None = None,
+ ) -> "Figure":
+ """Add a named data series.
+
+ Args:
+ name: Legend / label for the series.
+ x: x values — any numeric iterable.
+ y: y values — any numeric iterable, same length as *x*.
+ y_log: When True, use log10 y-axis for this trace.
+ y_label: Callable that formats a y value into a tick-label string
+ for this trace.
+
+ Returns:
+ ``self``, so calls can be chained.
+ """
+
+ xs, ys = self._coerce(x, y)
+ self._xs[name] = xs
+ self._ys[name] = ys
+ self._y_logs[name] = y_log
+ self._y_labels[name] = y_label or (lambda v: f"{v:.3g}")
+ return self
+
+ def plot(
+ self,
+ x: Any,
+ y: Any,
+ *,
+ label: str = "",
+ y_log: bool = False,
+ y_label: Callable[[float], str] | None = None,
+ ) -> "Figure":
+ """Add a data series.
+
+ Convenience wrapper around :meth:`add_trace` that accepts positional
+ ``x``/``y`` and auto-generates a name when *label* is omitted.
+
+ Args:
+ x: x values — any numeric iterable.
+ y: y values — any numeric iterable, same length as *x*.
+ label: Series name. Auto-generated (``series1``, ``series2``, …)
+ when omitted.
+ y_log: When True, use log10 y-axis for this trace.
+ y_label: Callable that formats a y value into a tick-label string
+ for this trace.
+
+ Returns:
+ ``self``, so calls can be chained.
+ """
+ key = label or f"series{len(self._xs) + 1}"
+ return self.add_trace(key, x=x, y=y, y_log=y_log, y_label=y_label)
+
+ def render(self) -> Text:
+ """Return the full ASCII chart as a :class:`rich.text.Text`.
+
+ The object can be passed around and printed later via
+ ``console.print(fig.render())``.
+ """
+ if not self._xs:
+ t = Text()
+ t.append("No plottable data.", style="muted")
+ return t
+
+ styles = {key: _SERIES_STYLES[i % len(_SERIES_STYLES)] for i, key in enumerate(self._xs)}
+ first_key = next(iter(self._xs))
+
+ text = Text()
+ for key in self._xs:
+ x_from = self._x_label(self._xs[key][0])
+ x_to = self._x_label(self._xs[key][-1])
+ text.append(
+ f" {key} ({x_from} – {x_to}) {self._ys[key][0]:.4g} → {self._ys[key][-1]:.4g}\n",
+ style=styles[key],
+ )
+
+ # y_log / y_label: all traces must share the same scale for the chart
+ # to be meaningful. Raise early if the caller mixed scales.
+ y_log_vals = set(self._y_logs.values())
+ if len(y_log_vals) > 1:
+ raise ValueError("All traces in a Figure must use the same y-axis scale. Cannot mix log and linear traces.")
+ text.append_text(
+ _plot(
+ self._xs,
+ self._ys,
+ styles=styles,
+ width=self._width,
+ height=self._height,
+ x_label=self._x_label,
+ y_label=self._y_labels[first_key],
+ y_log=self._y_logs[first_key],
+ n_xticks=self._n_xticks,
+ left_padding=self._left_padding,
+ )
+ )
+ return text
diff --git a/src/together/lib/utils/files.py b/src/together/lib/utils/files.py
index 86d0c0ce..9027bca4 100644
--- a/src/together/lib/utils/files.py
+++ b/src/together/lib/utils/files.py
@@ -75,6 +75,7 @@ def check_file(
if not file.is_file():
report_dict["found"] = False
report_dict["is_check_passed"] = False
+ report_dict["message"] = f"File not found or path is not a regular file: {file}"
return report_dict
else:
report_dict["found"] = True
@@ -105,9 +106,11 @@ def check_file(
report_dict["filetype"] = "csv"
data_report_dict = _check_csv(file, purpose)
else:
- report_dict["filetype"] = (
+ unknown_ext_msg = (
f"Unknown extension of file {file}. Only files with extensions .jsonl, .parquet, and .csv are supported."
)
+ report_dict["filetype"] = unknown_ext_msg
+ report_dict["message"] = unknown_ext_msg
report_dict["is_check_passed"] = False
report_dict.update(data_report_dict)
diff --git a/src/together/resources/audio/speech.py b/src/together/resources/audio/speech.py
index e3fdfd6b..59af70eb 100644
--- a/src/together/resources/audio/speech.py
+++ b/src/together/resources/audio/speech.py
@@ -92,20 +92,28 @@ def create(
sending the model name as the query parameter.
[View all supported voices here](https://docs.together.ai/docs/text-to-speech#supported-voices).
+ `hexgrad/Kokoro-82M` additionally supports voice mixing, where two or more
+ voices are combined into a single blended voice by joining their names with `+`
+ (e.g. `af_bella+af_heart`). Optional per-voice weights can be provided in
+ parentheses (e.g. `af_bella(2)+af_heart(1)`). Other models require a single
+ voice name.
+
bit_rate: Bitrate of the MP3 audio output in bits per second. Only applicable when
response_format is mp3. Higher values produce better audio quality at larger
file sizes. Default is 128000. Currently supported on Cartesia models.
language: Language of input text.
- response_encoding: Audio encoding of response
+ response_encoding: Audio encoding of response. Only applicable when response_format is raw or pcm.
+ Cartesia models respect this parameter and support all values. Orpheus, Kokoro,
+ and Minimax models always return pcm_s16le regardless of this setting.
response_format: The format of audio output. Supported formats are mp3, wav, raw if streaming is
false. If streaming is true, the only supported format is raw.
- sample_rate: Sampling rate to use for the output audio. The default sampling rate for
- canopylabs/orpheus-3b-0.1-ft and hexgrad/Kokoro-82M is 24000 and for
- cartesia/sonic is 44100.
+ sample_rate: Sampling rate in Hz for the output audio. Cartesia and Minimax models respect
+ this parameter. Orpheus and Kokoro models always output at 24000 Hz regardless
+ of this setting.
stream: If true, output is streamed for several characters at a time instead of waiting
for the full response. The stream terminates with `data: [DONE]`. If false,
@@ -168,20 +176,28 @@ def create(
sending the model name as the query parameter.
[View all supported voices here](https://docs.together.ai/docs/text-to-speech#supported-voices).
+ `hexgrad/Kokoro-82M` additionally supports voice mixing, where two or more
+ voices are combined into a single blended voice by joining their names with `+`
+ (e.g. `af_bella+af_heart`). Optional per-voice weights can be provided in
+ parentheses (e.g. `af_bella(2)+af_heart(1)`). Other models require a single
+ voice name.
+
bit_rate: Bitrate of the MP3 audio output in bits per second. Only applicable when
response_format is mp3. Higher values produce better audio quality at larger
file sizes. Default is 128000. Currently supported on Cartesia models.
language: Language of input text.
- response_encoding: Audio encoding of response
+ response_encoding: Audio encoding of response. Only applicable when response_format is raw or pcm.
+ Cartesia models respect this parameter and support all values. Orpheus, Kokoro,
+ and Minimax models always return pcm_s16le regardless of this setting.
response_format: The format of audio output. Supported formats are mp3, wav, raw if streaming is
false. If streaming is true, the only supported format is raw.
- sample_rate: Sampling rate to use for the output audio. The default sampling rate for
- canopylabs/orpheus-3b-0.1-ft and hexgrad/Kokoro-82M is 24000 and for
- cartesia/sonic is 44100.
+ sample_rate: Sampling rate in Hz for the output audio. Cartesia and Minimax models respect
+ this parameter. Orpheus and Kokoro models always output at 24000 Hz regardless
+ of this setting.
extra_headers: Send extra headers
@@ -240,20 +256,28 @@ def create(
sending the model name as the query parameter.
[View all supported voices here](https://docs.together.ai/docs/text-to-speech#supported-voices).
+ `hexgrad/Kokoro-82M` additionally supports voice mixing, where two or more
+ voices are combined into a single blended voice by joining their names with `+`
+ (e.g. `af_bella+af_heart`). Optional per-voice weights can be provided in
+ parentheses (e.g. `af_bella(2)+af_heart(1)`). Other models require a single
+ voice name.
+
bit_rate: Bitrate of the MP3 audio output in bits per second. Only applicable when
response_format is mp3. Higher values produce better audio quality at larger
file sizes. Default is 128000. Currently supported on Cartesia models.
language: Language of input text.
- response_encoding: Audio encoding of response
+ response_encoding: Audio encoding of response. Only applicable when response_format is raw or pcm.
+ Cartesia models respect this parameter and support all values. Orpheus, Kokoro,
+ and Minimax models always return pcm_s16le regardless of this setting.
response_format: The format of audio output. Supported formats are mp3, wav, raw if streaming is
false. If streaming is true, the only supported format is raw.
- sample_rate: Sampling rate to use for the output audio. The default sampling rate for
- canopylabs/orpheus-3b-0.1-ft and hexgrad/Kokoro-82M is 24000 and for
- cartesia/sonic is 44100.
+ sample_rate: Sampling rate in Hz for the output audio. Cartesia and Minimax models respect
+ this parameter. Orpheus and Kokoro models always output at 24000 Hz regardless
+ of this setting.
extra_headers: Send extra headers
@@ -377,20 +401,28 @@ async def create(
sending the model name as the query parameter.
[View all supported voices here](https://docs.together.ai/docs/text-to-speech#supported-voices).
+ `hexgrad/Kokoro-82M` additionally supports voice mixing, where two or more
+ voices are combined into a single blended voice by joining their names with `+`
+ (e.g. `af_bella+af_heart`). Optional per-voice weights can be provided in
+ parentheses (e.g. `af_bella(2)+af_heart(1)`). Other models require a single
+ voice name.
+
bit_rate: Bitrate of the MP3 audio output in bits per second. Only applicable when
response_format is mp3. Higher values produce better audio quality at larger
file sizes. Default is 128000. Currently supported on Cartesia models.
language: Language of input text.
- response_encoding: Audio encoding of response
+ response_encoding: Audio encoding of response. Only applicable when response_format is raw or pcm.
+ Cartesia models respect this parameter and support all values. Orpheus, Kokoro,
+ and Minimax models always return pcm_s16le regardless of this setting.
response_format: The format of audio output. Supported formats are mp3, wav, raw if streaming is
false. If streaming is true, the only supported format is raw.
- sample_rate: Sampling rate to use for the output audio. The default sampling rate for
- canopylabs/orpheus-3b-0.1-ft and hexgrad/Kokoro-82M is 24000 and for
- cartesia/sonic is 44100.
+ sample_rate: Sampling rate in Hz for the output audio. Cartesia and Minimax models respect
+ this parameter. Orpheus and Kokoro models always output at 24000 Hz regardless
+ of this setting.
stream: If true, output is streamed for several characters at a time instead of waiting
for the full response. The stream terminates with `data: [DONE]`. If false,
@@ -453,20 +485,28 @@ async def create(
sending the model name as the query parameter.
[View all supported voices here](https://docs.together.ai/docs/text-to-speech#supported-voices).
+ `hexgrad/Kokoro-82M` additionally supports voice mixing, where two or more
+ voices are combined into a single blended voice by joining their names with `+`
+ (e.g. `af_bella+af_heart`). Optional per-voice weights can be provided in
+ parentheses (e.g. `af_bella(2)+af_heart(1)`). Other models require a single
+ voice name.
+
bit_rate: Bitrate of the MP3 audio output in bits per second. Only applicable when
response_format is mp3. Higher values produce better audio quality at larger
file sizes. Default is 128000. Currently supported on Cartesia models.
language: Language of input text.
- response_encoding: Audio encoding of response
+ response_encoding: Audio encoding of response. Only applicable when response_format is raw or pcm.
+ Cartesia models respect this parameter and support all values. Orpheus, Kokoro,
+ and Minimax models always return pcm_s16le regardless of this setting.
response_format: The format of audio output. Supported formats are mp3, wav, raw if streaming is
false. If streaming is true, the only supported format is raw.
- sample_rate: Sampling rate to use for the output audio. The default sampling rate for
- canopylabs/orpheus-3b-0.1-ft and hexgrad/Kokoro-82M is 24000 and for
- cartesia/sonic is 44100.
+ sample_rate: Sampling rate in Hz for the output audio. Cartesia and Minimax models respect
+ this parameter. Orpheus and Kokoro models always output at 24000 Hz regardless
+ of this setting.
extra_headers: Send extra headers
@@ -525,20 +565,28 @@ async def create(
sending the model name as the query parameter.
[View all supported voices here](https://docs.together.ai/docs/text-to-speech#supported-voices).
+ `hexgrad/Kokoro-82M` additionally supports voice mixing, where two or more
+ voices are combined into a single blended voice by joining their names with `+`
+ (e.g. `af_bella+af_heart`). Optional per-voice weights can be provided in
+ parentheses (e.g. `af_bella(2)+af_heart(1)`). Other models require a single
+ voice name.
+
bit_rate: Bitrate of the MP3 audio output in bits per second. Only applicable when
response_format is mp3. Higher values produce better audio quality at larger
file sizes. Default is 128000. Currently supported on Cartesia models.
language: Language of input text.
- response_encoding: Audio encoding of response
+ response_encoding: Audio encoding of response. Only applicable when response_format is raw or pcm.
+ Cartesia models respect this parameter and support all values. Orpheus, Kokoro,
+ and Minimax models always return pcm_s16le regardless of this setting.
response_format: The format of audio output. Supported formats are mp3, wav, raw if streaming is
false. If streaming is true, the only supported format is raw.
- sample_rate: Sampling rate to use for the output audio. The default sampling rate for
- canopylabs/orpheus-3b-0.1-ft and hexgrad/Kokoro-82M is 24000 and for
- cartesia/sonic is 44100.
+ sample_rate: Sampling rate in Hz for the output audio. Cartesia and Minimax models respect
+ this parameter. Orpheus and Kokoro models always output at 24000 Hz regardless
+ of this setting.
extra_headers: Send extra headers
diff --git a/src/together/resources/beta/clusters/clusters.py b/src/together/resources/beta/clusters/clusters.py
index 48cb5229..15325721 100644
--- a/src/together/resources/beta/clusters/clusters.py
+++ b/src/together/resources/beta/clusters/clusters.py
@@ -101,7 +101,9 @@ def create(
Args:
billing_type: RESERVED billing types allow you to specify the duration of the cluster
reservation via the duration_days field. ON_DEMAND billing types will give you
- ownership of the cluster until you delete it.
+ ownership of the cluster until you delete it. SCHEDULED_CAPACITY billing types
+ allow you to reserve capacity for a scheduled time window. You must specify the
+ reservation_start_time and reservation_end_time with this request.
cluster_name: Name of the GPU cluster.
@@ -422,7 +424,9 @@ async def create(
Args:
billing_type: RESERVED billing types allow you to specify the duration of the cluster
reservation via the duration_days field. ON_DEMAND billing types will give you
- ownership of the cluster until you delete it.
+ ownership of the cluster until you delete it. SCHEDULED_CAPACITY billing types
+ allow you to reserve capacity for a scheduled time window. You must specify the
+ reservation_start_time and reservation_end_time with this request.
cluster_name: Name of the GPU cluster.
diff --git a/src/together/resources/beta/jig/jig.py b/src/together/resources/beta/jig/jig.py
index 24f4a1b5..dd318e71 100644
--- a/src/together/resources/beta/jig/jig.py
+++ b/src/together/resources/beta/jig/jig.py
@@ -128,7 +128,7 @@ def update(
description: str | Omit = omit,
environment_variables: Iterable[jig_update_params.EnvironmentVariable] | Omit = omit,
gpu_count: int | Omit = omit,
- gpu_type: Literal["h100-80gb"] | Omit = omit,
+ gpu_type: Literal["h100-80gb", "h100-40gb-mig", "b200-192gb"] | Omit = omit,
health_check_path: str | Omit = omit,
image: str | Omit = omit,
max_replicas: int | Omit = omit,
@@ -262,7 +262,7 @@ def list(
def deploy(
self,
*,
- gpu_type: Literal["h100-80gb"],
+ gpu_type: Literal["h100-80gb", "h100-40gb-mig", "b200-192gb"],
image: str,
name: str,
args: SequenceNotStr[str] | Omit = omit,
@@ -538,7 +538,7 @@ async def update(
description: str | Omit = omit,
environment_variables: Iterable[jig_update_params.EnvironmentVariable] | Omit = omit,
gpu_count: int | Omit = omit,
- gpu_type: Literal["h100-80gb"] | Omit = omit,
+ gpu_type: Literal["h100-80gb", "h100-40gb-mig", "b200-192gb"] | Omit = omit,
health_check_path: str | Omit = omit,
image: str | Omit = omit,
max_replicas: int | Omit = omit,
@@ -672,7 +672,7 @@ async def list(
async def deploy(
self,
*,
- gpu_type: Literal["h100-80gb"],
+ gpu_type: Literal["h100-80gb", "h100-40gb-mig", "b200-192gb"],
image: str,
name: str,
args: SequenceNotStr[str] | Omit = omit,
diff --git a/src/together/resources/fine_tuning.py b/src/together/resources/fine_tuning.py
index fd6ec2d7..8217c2e7 100644
--- a/src/together/resources/fine_tuning.py
+++ b/src/together/resources/fine_tuning.py
@@ -42,6 +42,7 @@
from ..types.fine_tuning_cancel_response import FineTuningCancelResponse
from ..types.fine_tuning_delete_response import FineTuningDeleteResponse
from ..types.fine_tuning_list_events_response import FineTuningListEventsResponse
+from ..types.fine_tuning_list_metrics_response import FineTuningListMetricsResponse
from ..types.fine_tuning_estimate_price_response import FineTuningEstimatePriceResponse
from ..types.fine_tuning_list_checkpoints_response import FineTuningListCheckpointsResponse
@@ -622,6 +623,44 @@ def list_events(
cast_to=FineTuningListEventsResponse,
)
+ def list_metrics(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FineTuningListMetricsResponse:
+ """Retrieves recorded training metrics for a fine-tuning job in chronological
+ order.
+
+ All filter fields are optional — omit the body or send `{}` to retrieve
+ all metrics.
+
+ Args:
+ id: Fine-tune job ID. A string that starts with `ft-`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return self._get(
+ path_template("/fine-tunes/{id}/metrics", id=id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FineTuningListMetricsResponse,
+ )
+
class AsyncFineTuningResource(AsyncAPIResource):
@cached_property
@@ -1184,6 +1223,44 @@ async def list_events(
cast_to=FineTuningListEventsResponse,
)
+ async def list_metrics(
+ self,
+ id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> FineTuningListMetricsResponse:
+ """Retrieves recorded training metrics for a fine-tuning job in chronological
+ order.
+
+ All filter fields are optional — omit the body or send `{}` to retrieve
+ all metrics.
+
+ Args:
+ id: Fine-tune job ID. A string that starts with `ft-`.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not id:
+ raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
+ return await self._get(
+ path_template("/fine-tunes/{id}/metrics", id=id),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FineTuningListMetricsResponse,
+ )
+
class FineTuningResourceWithRawResponse:
def __init__(self, fine_tuning: FineTuningResource) -> None:
@@ -1214,6 +1291,9 @@ def __init__(self, fine_tuning: FineTuningResource) -> None:
self.list_events = to_raw_response_wrapper(
fine_tuning.list_events,
)
+ self.list_metrics = to_raw_response_wrapper(
+ fine_tuning.list_metrics,
+ )
class AsyncFineTuningResourceWithRawResponse:
@@ -1245,6 +1325,9 @@ def __init__(self, fine_tuning: AsyncFineTuningResource) -> None:
self.list_events = async_to_raw_response_wrapper(
fine_tuning.list_events,
)
+ self.list_metrics = async_to_raw_response_wrapper(
+ fine_tuning.list_metrics,
+ )
class FineTuningResourceWithStreamingResponse:
@@ -1276,6 +1359,9 @@ def __init__(self, fine_tuning: FineTuningResource) -> None:
self.list_events = to_streamed_response_wrapper(
fine_tuning.list_events,
)
+ self.list_metrics = to_streamed_response_wrapper(
+ fine_tuning.list_metrics,
+ )
class AsyncFineTuningResourceWithStreamingResponse:
@@ -1307,3 +1393,6 @@ def __init__(self, fine_tuning: AsyncFineTuningResource) -> None:
self.list_events = async_to_streamed_response_wrapper(
fine_tuning.list_events,
)
+ self.list_metrics = async_to_streamed_response_wrapper(
+ fine_tuning.list_metrics,
+ )
diff --git a/src/together/types/__init__.py b/src/together/types/__init__.py
index 54b6e7d0..dc9600b1 100644
--- a/src/together/types/__init__.py
+++ b/src/together/types/__init__.py
@@ -62,6 +62,7 @@
from .endpoint_list_hardware_response import EndpointListHardwareResponse as EndpointListHardwareResponse
from .fine_tuning_list_events_response import FineTuningListEventsResponse as FineTuningListEventsResponse
from .fine_tuning_estimate_price_params import FineTuningEstimatePriceParams as FineTuningEstimatePriceParams
+from .fine_tuning_list_metrics_response import FineTuningListMetricsResponse as FineTuningListMetricsResponse
from .fine_tuning_estimate_price_response import FineTuningEstimatePriceResponse as FineTuningEstimatePriceResponse
from .fine_tuning_list_checkpoints_response import (
FineTuningListCheckpointsResponse as FineTuningListCheckpointsResponse,
diff --git a/src/together/types/audio/speech_create_params.py b/src/together/types/audio/speech_create_params.py
index 289c7850..c3992d4e 100644
--- a/src/together/types/audio/speech_create_params.py
+++ b/src/together/types/audio/speech_create_params.py
@@ -31,6 +31,12 @@ class SpeechCreateParamsBase(TypedDict, total=False):
You can view the voices supported for each model using the /v1/voices endpoint
sending the model name as the query parameter.
[View all supported voices here](https://docs.together.ai/docs/text-to-speech#supported-voices).
+
+ `hexgrad/Kokoro-82M` additionally supports voice mixing, where two or more
+ voices are combined into a single blended voice by joining their names with `+`
+ (e.g. `af_bella+af_heart`). Optional per-voice weights can be provided in
+ parentheses (e.g. `af_bella(2)+af_heart(1)`). Other models require a single
+ voice name.
"""
bit_rate: Literal[32000, 64000, 96000, 128000, 192000]
@@ -45,7 +51,12 @@ class SpeechCreateParamsBase(TypedDict, total=False):
"""Language of input text."""
response_encoding: Literal["pcm_f32le", "pcm_s16le", "pcm_mulaw", "pcm_alaw"]
- """Audio encoding of response"""
+ """Audio encoding of response.
+
+ Only applicable when response_format is raw or pcm. Cartesia models respect this
+ parameter and support all values. Orpheus, Kokoro, and Minimax models always
+ return pcm_s16le regardless of this setting.
+ """
response_format: Literal["mp3", "wav", "raw"]
"""The format of audio output.
@@ -55,10 +66,10 @@ class SpeechCreateParamsBase(TypedDict, total=False):
"""
sample_rate: int
- """Sampling rate to use for the output audio.
+ """Sampling rate in Hz for the output audio.
- The default sampling rate for canopylabs/orpheus-3b-0.1-ft and
- hexgrad/Kokoro-82M is 24000 and for cartesia/sonic is 44100.
+ Cartesia and Minimax models respect this parameter. Orpheus and Kokoro models
+ always output at 24000 Hz regardless of this setting.
"""
diff --git a/src/together/types/beta/cluster_create_params.py b/src/together/types/beta/cluster_create_params.py
index 4a5a549e..5bdcb343 100644
--- a/src/together/types/beta/cluster_create_params.py
+++ b/src/together/types/beta/cluster_create_params.py
@@ -16,7 +16,9 @@ class ClusterCreateParams(TypedDict, total=False):
"""
RESERVED billing types allow you to specify the duration of the cluster
reservation via the duration_days field. ON_DEMAND billing types will give you
- ownership of the cluster until you delete it.
+ ownership of the cluster until you delete it. SCHEDULED_CAPACITY billing types
+ allow you to reserve capacity for a scheduled time window. You must specify the
+ reservation_start_time and reservation_end_time with this request.
"""
cluster_name: Required[str]
diff --git a/src/together/types/beta/deployment.py b/src/together/types/beta/deployment.py
index 64bf81ab..378067c1 100644
--- a/src/together/types/beta/deployment.py
+++ b/src/together/types/beta/deployment.py
@@ -199,7 +199,7 @@ class Deployment(BaseModel):
gpu_count: Optional[int] = None
"""GPUCount is the number of GPUs allocated to each replica in this deployment"""
- gpu_type: Optional[Literal["h100-80gb", " a100-80gb"]] = None
+ gpu_type: Optional[Literal["h100-80gb", "h100-40gb-mig", "b200-192gb"]] = None
"""GPUType specifies the type of GPU requested (if any) for this deployment"""
health_check_path: Optional[str] = None
diff --git a/src/together/types/beta/jig_deploy_params.py b/src/together/types/beta/jig_deploy_params.py
index fbd30b6a..b6c5797f 100644
--- a/src/together/types/beta/jig_deploy_params.py
+++ b/src/together/types/beta/jig_deploy_params.py
@@ -19,7 +19,7 @@
class JigDeployParams(TypedDict, total=False):
- gpu_type: Required[Literal["h100-80gb"]]
+ gpu_type: Required[Literal["h100-80gb", "h100-40gb-mig", "b200-192gb"]]
"""GPUType specifies the GPU hardware to use (e.g., "h100-80gb")."""
image: Required[str]
diff --git a/src/together/types/beta/jig_update_params.py b/src/together/types/beta/jig_update_params.py
index deb33e9e..e73aad28 100644
--- a/src/together/types/beta/jig_update_params.py
+++ b/src/together/types/beta/jig_update_params.py
@@ -52,7 +52,7 @@ class JigUpdateParams(TypedDict, total=False):
gpu_count: int
"""GPUCount is the number of GPUs to allocate per container instance"""
- gpu_type: Literal["h100-80gb"]
+ gpu_type: Literal["h100-80gb", "h100-40gb-mig", "b200-192gb"]
"""GPUType specifies the GPU hardware to use (e.g., "h100-80gb")"""
health_check_path: str
diff --git a/src/together/types/fine_tuning_list_metrics_response.py b/src/together/types/fine_tuning_list_metrics_response.py
new file mode 100644
index 00000000..2ff55a59
--- /dev/null
+++ b/src/together/types/fine_tuning_list_metrics_response.py
@@ -0,0 +1,11 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from .._models import BaseModel
+
+__all__ = ["FineTuningListMetricsResponse"]
+
+
+class FineTuningListMetricsResponse(BaseModel):
+ metrics: Optional[List[Dict[str, float]]] = None
diff --git a/tests/api_resources/test_fine_tuning.py b/tests/api_resources/test_fine_tuning.py
index ddba0d79..91dc74a6 100644
--- a/tests/api_resources/test_fine_tuning.py
+++ b/tests/api_resources/test_fine_tuning.py
@@ -17,6 +17,7 @@
FineTuningCancelResponse,
FineTuningDeleteResponse,
FineTuningListEventsResponse,
+ FineTuningListMetricsResponse,
FineTuningEstimatePriceResponse,
FineTuningListCheckpointsResponse,
)
@@ -360,6 +361,44 @@ def test_path_params_list_events(self, client: Together) -> None:
"",
)
+ @parametrize
+ def test_method_list_metrics(self, client: Together) -> None:
+ fine_tuning = client.fine_tuning.list_metrics(
+ "id",
+ )
+ assert_matches_type(FineTuningListMetricsResponse, fine_tuning, path=["response"])
+
+ @parametrize
+ def test_raw_response_list_metrics(self, client: Together) -> None:
+ response = client.fine_tuning.with_raw_response.list_metrics(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ fine_tuning = response.parse()
+ assert_matches_type(FineTuningListMetricsResponse, fine_tuning, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list_metrics(self, client: Together) -> None:
+ with client.fine_tuning.with_streaming_response.list_metrics(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ fine_tuning = response.parse()
+ assert_matches_type(FineTuningListMetricsResponse, fine_tuning, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list_metrics(self, client: Together) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ client.fine_tuning.with_raw_response.list_metrics(
+ "",
+ )
+
class TestAsyncFineTuning:
parametrize = pytest.mark.parametrize(
@@ -692,3 +731,41 @@ async def test_path_params_list_events(self, async_client: AsyncTogether) -> Non
await async_client.fine_tuning.with_raw_response.list_events(
"",
)
+
+ @parametrize
+ async def test_method_list_metrics(self, async_client: AsyncTogether) -> None:
+ fine_tuning = await async_client.fine_tuning.list_metrics(
+ "id",
+ )
+ assert_matches_type(FineTuningListMetricsResponse, fine_tuning, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list_metrics(self, async_client: AsyncTogether) -> None:
+ response = await async_client.fine_tuning.with_raw_response.list_metrics(
+ "id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ fine_tuning = await response.parse()
+ assert_matches_type(FineTuningListMetricsResponse, fine_tuning, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list_metrics(self, async_client: AsyncTogether) -> None:
+ async with async_client.fine_tuning.with_streaming_response.list_metrics(
+ "id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ fine_tuning = await response.parse()
+ assert_matches_type(FineTuningListMetricsResponse, fine_tuning, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list_metrics(self, async_client: AsyncTogether) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `id` but received ''"):
+ await async_client.fine_tuning.with_raw_response.list_metrics(
+ "",
+ )
diff --git a/tests/cli/test_files.py b/tests/cli/test_files.py
index 3a50e9c3..bfa7356a 100644
--- a/tests/cli/test_files.py
+++ b/tests/cli/test_files.py
@@ -68,6 +68,21 @@ def test_check(self, tmp_path: Path, cli_runner: CliRunner) -> None:
result = cli_runner.invoke(["files", "check", str(sample)])
assert result.exit_code == 0
+ def test_check_missing_file(self, tmp_path: Path, cli_runner: CliRunner) -> None:
+ missing = tmp_path / "nope.jsonl"
+ result = cli_runner.invoke(["files", "check", str(missing)])
+ assert result.exit_code == 1
+ assert "Checks passed" not in result.output
+ assert "not found" in result.output.lower() or "not a regular file" in result.output.lower()
+
+ def test_check_non_jsonl_extension(self, tmp_path: Path, cli_runner: CliRunner) -> None:
+ bad = tmp_path / "bad.txt"
+ bad.write_text("notjson", encoding="utf-8")
+ result = cli_runner.invoke(["files", "check", str(bad)])
+ assert result.exit_code == 1
+ assert "Checks passed" not in result.output
+ assert "Unknown extension" in result.output
+
class TestFilesDelete:
@pytest.mark.respx(base_url=base_url)
diff --git a/tests/cli/test_fine_tuning.py b/tests/cli/test_fine_tuning.py
index b7646c6e..635e9e0d 100644
--- a/tests/cli/test_fine_tuning.py
+++ b/tests/cli/test_fine_tuning.py
@@ -93,6 +93,15 @@ def test_list_json(self, respx_mock: MockRouter, cli_runner: CliRunner) -> None:
parsed = json.loads(result.output)
assert [x["id"] for x in parsed] == ["ft-newer", "ft-older"]
+ @pytest.mark.respx(base_url=base_url)
+ def test_ft_alias_list(self, respx_mock: MockRouter, cli_runner: CliRunner) -> None:
+ respx_mock.get("/fine-tunes").mock(
+ return_value=httpx.Response(200, json={"data": [_FT_LIST_ITEM_OLDER, _FT_LIST_ITEM]})
+ )
+ result = cli_runner.invoke(["ft", "list"])
+ assert result.exit_code == 0
+ assert "ft-newer" in result.output
+
class TestFineTuningRetrieve:
@pytest.mark.respx(base_url=base_url)
diff --git a/tests/cli/test_list_table.py b/tests/cli/test_list_table.py
new file mode 100644
index 00000000..68f98c37
--- /dev/null
+++ b/tests/cli/test_list_table.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+from together.lib.cli.utils._console import console
+from together.lib.cli.components.list import ListTable
+
+
+def _render(t: ListTable, width: int = 80) -> str:
+ with console.capture() as cap:
+ console.print(t, width=width)
+ return cap.get()
+
+
+def test_list_table_empty_renders_panel_not_header_only_table() -> None:
+ t = ListTable(title="Files", empty_message="Nothing to show")
+ t.add_primary_column("ID")
+ t.add_column("Name")
+ out = _render(t, width=80)
+ assert "Files" in out
+ assert "Nothing to show" in out
+ # Must not be a data table with column headers and no rows (header/body join).
+ assert "├" not in out
+
+
+def test_list_table_empty_no_title() -> None:
+ t = ListTable()
+ t.add_primary_column("A")
+ out = _render(t, width=40)
+ assert "Nothing to show" in out
+
+
+def test_list_table_with_rows_still_table() -> None:
+ t = ListTable(title="X")
+ t.add_primary_column("ID")
+ t.add_row("1")
+ out = _render(t, width=40)
+ assert "ID" in out
+ assert "1" in out
+ assert "Nothing to show" not in out
diff --git a/tests/test_extract_files.py b/tests/test_extract_files.py
index b67e1d4b..266d3e08 100644
--- a/tests/test_extract_files.py
+++ b/tests/test_extract_files.py
@@ -4,7 +4,7 @@
import pytest
-from together._types import FileTypes
+from together._types import FileTypes, ArrayFormat
from together._utils import extract_files
@@ -37,10 +37,7 @@ def test_multiple_files() -> None:
def test_top_level_file_array() -> None:
query = {"files": [b"file one", b"file two"], "title": "hello"}
- assert extract_files(query, paths=[["files", ""]]) == [
- ("files[]", b"file one"),
- ("files[]", b"file two"),
- ]
+ assert extract_files(query, paths=[["files", ""]]) == [("files[]", b"file one"), ("files[]", b"file two")]
assert query == {"title": "hello"}
@@ -71,3 +68,24 @@ def test_ignores_incorrect_paths(
expected: list[tuple[str, FileTypes]],
) -> None:
assert extract_files(query, paths=paths) == expected
+
+
+@pytest.mark.parametrize(
+ "array_format,expected_top_level,expected_nested",
+ [
+ ("brackets", [("files[]", b"a"), ("files[]", b"b")], [("items[][file]", b"a"), ("items[][file]", b"b")]),
+ ("repeat", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("comma", [("files", b"a"), ("files", b"b")], [("items[file]", b"a"), ("items[file]", b"b")]),
+ ("indices", [("files[0]", b"a"), ("files[1]", b"b")], [("items[0][file]", b"a"), ("items[1][file]", b"b")]),
+ ],
+)
+def test_array_format_controls_file_field_names(
+ array_format: ArrayFormat,
+ expected_top_level: list[tuple[str, FileTypes]],
+ expected_nested: list[tuple[str, FileTypes]],
+) -> None:
+ top_level = {"files": [b"a", b"b"]}
+ assert extract_files(top_level, paths=[["files", ""]], array_format=array_format) == expected_top_level
+
+ nested = {"items": [{"file": b"a"}, {"file": b"b"}]}
+ assert extract_files(nested, paths=[["items", "", "file"]], array_format=array_format) == expected_nested
diff --git a/tests/test_files.py b/tests/test_files.py
index 1d6c6f5b..a7c60ffd 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -131,7 +131,7 @@ def test_extract_files_does_not_mutate_original_nested_array_path(self) -> None:
copied = deepcopy_with_paths(original, [["items", "", "file"]])
extracted = extract_files(copied, paths=[["items", "", "file"]])
- assert extracted == [("items[][file]", file1), ("items[][file]", file2)]
+ assert [entry for _, entry in extracted] == [file1, file2]
assert original == {
"items": [
{"file": file1, "extra": 1},
diff --git a/tests/test_plots_engine.py b/tests/test_plots_engine.py
new file mode 100644
index 00000000..3abaa7c1
--- /dev/null
+++ b/tests/test_plots_engine.py
@@ -0,0 +1,221 @@
+from __future__ import annotations
+
+import pytest
+
+from together.lib.cli.utils.plots._engine import (
+ Figure,
+ sparklines,
+ _interpolate,
+ _uniform_grid,
+)
+
+
+def linear_series(n: int = 10, start: float = 0.0, stop: float = 1.0) -> list[tuple[float, float]]:
+ step = (stop - start) / max(n - 1, 1)
+ return [(float(i), start + i * step) for i in range(n)]
+
+
+def constant_series(n: int = 5, value: float = 1.0) -> list[tuple[float, float]]:
+ return [(float(i), value) for i in range(n)]
+
+
+# Shared deterministic series used by golden-output tests
+_LOSS = [(float(i), 1.0 - i * 0.1) for i in range(10)] # 1.0 → 0.1
+_ACCURACY = [(float(i), 0.5 + i * 0.05) for i in range(10)] # 0.5 → 0.95
+_WIDE = [(float(i), 10.0**i) for i in range(5)] # 1, 10, 100, 1000, 10000
+
+
+def _X_LABEL(x: float) -> str:
+ return str(int(x))
+
+
+def _interp(xs: list[float], ys: list[float], x_grid: list[float]) -> list[float]:
+ """Helper: interpolate a single series onto x_grid."""
+ return _interpolate({"s": xs}, {"s": ys}, x_grid)["s"]
+
+
+class TestInterpolate:
+ def test_output_length_equals_grid(self) -> None:
+ xs = [float(i) for i in range(10)]
+ ys = [float(i) for i in range(10)]
+ x_grid = _uniform_grid({"s": xs}, 5)
+ result = _interp(xs, ys, x_grid)
+ assert len(result) == 5
+
+ def test_linear_data_interpolates_exactly(self) -> None:
+ xs = [0.0, 9.0]
+ ys = [0.0, 9.0]
+ x_grid = _uniform_grid({"s": xs}, 10)
+ result = _interp(xs, ys, x_grid)
+ # grid points are 0.0, 0.9, 1.8, ..., 8.1 — y=x so values match
+ assert result == pytest.approx(x_grid, abs=1e-9) # type: ignore[misc]
+
+ def test_constant_series_stays_constant(self) -> None:
+ xs = [float(i) for i in range(20)]
+ ys = [7.0] * 20
+ x_grid = _uniform_grid({"s": xs}, 10)
+ result = _interp(xs, ys, x_grid)
+ assert result == pytest.approx([7.0] * 10, abs=1e-9) # type: ignore[misc]
+
+ def test_left_padding(self) -> None:
+ xs = [5.0, 9.0]
+ ys = [99.0, 99.0]
+ x_grid = _uniform_grid({"range": [0.0, 9.0]}, 10)
+ result = _interp(xs, ys, x_grid)
+ assert result == [99.0] * 10
+
+ def test_right_padding(self) -> None:
+ xs = [0.0, 2.0]
+ ys = [42.0, 42.0]
+ x_grid = _uniform_grid({"range": [0.0, 9.0]}, 10)
+ result = _interp(xs, ys, x_grid)
+ assert result == [42.0] * 10
+
+ def test_single_point_fills_all(self) -> None:
+ xs = [5.0]
+ ys = [3.14]
+ x_grid = _uniform_grid({"range": [0.0, 9.0]}, 8)
+ result = _interp(xs, ys, x_grid)
+ assert result == [3.14] * 8
+
+ def test_uniform_grid_length(self) -> None:
+ assert len(_uniform_grid({"s": [0.0, 10.0]}, 5)) == 5
+
+ def test_uniform_grid_endpoints(self) -> None:
+ grid = _uniform_grid({"s": [0.0, 9.0]}, 10)
+ assert grid[0] == pytest.approx(0.0) # type: ignore[misc]
+ assert grid[-1] == pytest.approx(9.0) # type: ignore[misc]
+
+
+class TestSparklines:
+ def test_empty_series_returns_no_data_message(self) -> None:
+ result = sparklines({}, width=20)
+ assert result.plain == "No plottable data."
+
+ def test_single_series_golden(self) -> None:
+ result = sparklines(
+ {"loss": ([p[0] for p in _LOSS], [p[1] for p in _LOSS])},
+ width=20,
+ )
+ assert result.plain == " loss ██▇▇▆▆▅▅▅▄▄▃▃▃▂▂▁▁ 1 → 0.1\n"
+
+ def test_multi_series_golden(self) -> None:
+ result = sparklines(
+ {
+ "loss": ([p[0] for p in _LOSS], [p[1] for p in _LOSS]),
+ "accuracy": ([p[0] for p in _ACCURACY], [p[1] for p in _ACCURACY]),
+ },
+ width=20,
+ )
+ assert result.plain == (
+ " loss ██▇▇▆▆▅▅▅▄▄▃▃▃▂▂▁▁ 1 → 0.1\n accuracy ▁▁▂▂▃▃▃▄▄▅▅▅▆▆▇▇██ 0.5 → 0.95\n"
+ )
+
+ def test_constant_series_golden(self) -> None:
+ _flat = constant_series(10, 5.0)
+ result = sparklines(
+ {"flat": ([p[0] for p in _flat], [p[1] for p in _flat])},
+ width=20,
+ )
+ assert result.plain == " flat 5 → 5\n"
+
+ def test_single_point_golden(self) -> None:
+ result = sparklines({"single": ([0.0], [1.0])}, width=20)
+ assert result.plain == " single 1 → 1\n"
+
+ def test_log_scale_golden(self) -> None:
+ result = sparklines(
+ {"wide": ([p[0] for p in _WIDE], [p[1] for p in _WIDE])},
+ width=20,
+ y_log={"wide": True},
+ )
+ assert result.plain == " wide ▁▁▂▂▂▃▃▄▄▅▅▆▆▆▇▇███ 1 → 1e+04\n"
+
+
+class TestAsciiCharts:
+ def test_empty_series_returns_no_data_message(self) -> None:
+ result = Figure().render()
+ assert result.plain == "No plottable data."
+
+ def test_single_series_golden(self) -> None:
+ result = (
+ Figure(width=20, height=4, n_xticks=3, x_label=_X_LABEL)
+ .add_trace("loss", x=[p[0] for p in _LOSS], y=[p[1] for p in _LOSS])
+ .render()
+ )
+ assert result.plain == (
+ " loss (0 – 9) 1 → 0.1\n"
+ " 1┼───╮ \n"
+ " 0.7┼ ╰─────╮ \n"
+ " 0.4┼ ╰─────╮ \n"
+ " 0.1┼ ╰─── \n"
+ " └┬─────────┬────────┬\n"
+ " 0 4 9\n"
+ )
+
+ def test_multi_series_golden(self) -> None:
+ result = (
+ Figure(width=20, height=4, n_xticks=3, x_label=_X_LABEL)
+ .add_trace("loss", x=[p[0] for p in _LOSS], y=[p[1] for p in _LOSS])
+ .add_trace("accuracy", x=[p[0] for p in _ACCURACY], y=[p[1] for p in _ACCURACY])
+ .render()
+ )
+ assert result.plain == (
+ " loss (0 – 9) 1 → 0.1\n"
+ " accuracy (0 – 9) 0.5 → 0.95\n"
+ " 1┼───╮ ╭──── \n"
+ " 0.7┼ ╭───────────╯ \n"
+ " 0.4┼──╯ ╰─────╮ \n"
+ " 0.1┼ ╰─── \n"
+ " └┬─────────┬────────┬\n"
+ " 0 4 9\n"
+ )
+
+ def test_log_scale_golden(self) -> None:
+ result = (
+ Figure(width=20, height=4, n_xticks=3, x_label=_X_LABEL)
+ .add_trace("metric", x=[p[0] for p in _WIDE], y=[p[1] for p in _WIDE], y_log=True)
+ .render()
+ )
+ assert result.plain == (
+ " metric (0 – 4) 1 → 1e+04\n"
+ " 1e+04┼ ╭──── \n"
+ " 464┼ ╭────╯ \n"
+ " 21.5┼ ╭───────╯ \n"
+ " 1┼─╯ \n"
+ " └┬─────────┬────────┬\n"
+ " 0 2 4\n"
+ )
+
+ def test_constant_series_golden(self) -> None:
+ _flat = constant_series(10, 42.0)
+ result = (
+ Figure(width=20, height=4, x_label=_X_LABEL)
+ .add_trace("flat", x=[p[0] for p in _flat], y=[p[1] for p in _flat])
+ .render()
+ )
+ assert result.plain == (
+ " flat (0 – 9) 42 → 42\n"
+ " 42┼ \n"
+ " 42┼ \n"
+ " 42┼ \n"
+ " 42┼─────────────────── \n"
+ " └┬─────────┬────────┬\n"
+ " 0 4 9\n"
+ )
+
+ def test_custom_x_label_golden(self) -> None:
+ result = (
+ Figure(width=20, height=4, n_xticks=3, x_label=lambda x: f"step{int(x)}")
+ .add_trace("m", x=[p[0] for p in _LOSS], y=[p[1] for p in _LOSS])
+ .render()
+ )
+ assert result.plain == (
+ " m (step0 – step9) 1 → 0.1\n"
+ " 1┼───╮ \n"
+ " 0.7┼ ╰─────╮ \n"
+ " 0.4┼ ╰─────╮ \n"
+ " 0.1┼ ╰─── \n"
+ " └┬─────────┬────────┬\n"
+ " step0 step4 step9\n"
+ )
diff --git a/tests/unit/test_cli_telemetry.py b/tests/unit/test_cli_telemetry.py
index 9b4516db..07e943da 100644
--- a/tests/unit/test_cli_telemetry.py
+++ b/tests/unit/test_cli_telemetry.py
@@ -51,6 +51,48 @@ def test_sanitize_cli_error_message_redacts_api_key_assignment() -> None:
assert "" in out
+def test_sanitize_cli_error_message_redacts_bare_sk_prefix() -> None:
+ out = sanitize_cli_error_message("invalid: sk-12345678abcdefghij")
+ assert "12345678abc" not in out
+ assert "sk-12" not in out
+ assert out.count("") >= 1
+
+
+def test_sanitize_cli_error_message_redacts_hf_and_tog_prefixes() -> None:
+ out = sanitize_cli_error_message("t: hf_abcdefghijklmnopqrstuwxyz123456 and tgp_v1_abcdefghijklmnop")
+ assert "hf_abc" not in out
+ assert "tgp_v1_abc" not in out
+
+
+def test_sanitize_cli_error_message_redacts_jwt() -> None:
+ jwt = "eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiIxIn0.abcdefghijklmnopqrstuvwxyz0123456789-_"
+ out = sanitize_cli_error_message(f"code 401: {jwt}")
+ assert "eyJ" not in out
+ assert "eyJh" not in out
+
+
+def test_sanitize_cli_error_message_redacts_url_userinfo() -> None:
+ out = sanitize_cli_error_message("fetch https://user:sekrit1@api.example.com/v1")
+ assert "sekrit1" not in out
+ assert "" in out
+ assert "https://:@" in out
+
+
+def test_sanitize_cli_error_message_redacts_url_query_secrets() -> None:
+ out = sanitize_cli_error_message("https://h.example/xy?api_key=hiddenvalueXXXX&n=1")
+ assert "hidden" not in out
+ assert "hiddenvalueXXXX" not in out
+
+
+def test_sanitize_cli_error_message_redacts_secrets_before_truncation() -> None:
+ # Prefix must not hide a long tail from redaction; redaction runs first.
+ secret = "sk-1234567890123456789012345678"
+ tail = "x" * 600
+ out = sanitize_cli_error_message(f"{secret}{tail}")
+ assert "sk-1" not in out
+ assert "123456" not in out
+
+
def test_telemetry_env_opt_out_only_explicit_values(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv("TOGETHER_TELEMETRY_DISABLED", raising=False)
assert is_tracking_enabled() is True
@@ -168,6 +210,26 @@ def test_parse_command_and_flags_strips_beta_prefix() -> None:
assert is_beta is True
+def test_parse_command_and_flags_normalizes_ft_to_fine_tuning() -> None:
+ from together.lib.cli import app
+ from together.lib.cli._track_cli import parse_command_and_flags
+
+ cmd, flags, is_beta = parse_command_and_flags(app, ["ft", "list", "--json"])
+ assert cmd == "fine-tuning list"
+ assert flags == ["json"]
+ assert is_beta is False
+
+
+def test_parse_command_and_flags_normalizes_ls_alias_to_list() -> None:
+ from together.lib.cli import app
+ from together.lib.cli._track_cli import parse_command_and_flags
+
+ cmd, flags, is_beta = parse_command_and_flags(app, ["endpoints", "ls", "--json"])
+ assert cmd == "endpoints list"
+ assert flags == ["json"]
+ assert is_beta is False
+
+
def test_parse_command_and_flags_positionals_are_argument_names_not_command_tokens() -> None:
from together.lib.cli import app
from together.lib.cli._track_cli import parse_command_and_flags
diff --git a/tests/unit/test_files_checks.py b/tests/unit/test_files_checks.py
index f4352cd0..06c9b24d 100644
--- a/tests/unit/test_files_checks.py
+++ b/tests/unit/test_files_checks.py
@@ -572,3 +572,21 @@ def test_check_csv_invalid_column(tmp_path: Path):
report = check_file(file)
assert not report["is_check_passed"]
+
+
+def test_check_file_missing_path(tmp_path: Path) -> None:
+ missing = tmp_path / "does_not_exist.jsonl"
+ report = check_file(missing)
+ assert not report["is_check_passed"]
+ assert report["found"] is False
+ assert "Checks passed" not in report["message"]
+ assert "not found" in report["message"].lower() or "not a regular file" in report["message"].lower()
+
+
+def test_check_file_unknown_extension(tmp_path: Path) -> None:
+ f = tmp_path / "data.txt"
+ f.write_text("notjson\n", encoding="utf-8")
+ report = check_file(f)
+ assert not report["is_check_passed"]
+ assert "Unknown extension" in report["message"]
+ assert report["message"] == report["filetype"]
diff --git a/uv.lock b/uv.lock
index f805cb71..ccb8c358 100644
--- a/uv.lock
+++ b/uv.lock
@@ -186,6 +186,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
]
+[[package]]
+name = "asciichartpy"
+version = "1.5.25"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "setuptools" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/41/3a/b01436be647f881515ec2f253616bf4a40c1d27d02a69e7f038e27fcdf81/asciichartpy-1.5.25.tar.gz", hash = "sha256:63a305302b2aad51da288b58226009b7b0313eba7d8e2452d5a21a13fcf44d74", size = 8201, upload-time = "2020-08-17T02:07:18.292Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3f/d0/7b958df957e4827837b590944008f0b28078f552b451f7407b4b3d54f574/asciichartpy-1.5.25-py2.py3-none-any.whl", hash = "sha256:33c417a3c8ef7d0a11b98eb9ea6dd9b2c1b17559e539b207a17d26d4302d0258", size = 7228, upload-time = "2020-08-17T02:07:16.386Z" },
+]
+
[[package]]
name = "async-timeout"
version = "5.0.1"
@@ -1454,6 +1466,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4d/e1/7348090988095e4e39560cfc2f7555b1b2a7357deba19167b600fdf5215d/ruff-0.14.13-py3-none-win_arm64.whl", hash = "sha256:7ab819e14f1ad9fe39f246cfcc435880ef7a9390d81a2b6ac7e01039083dd247", size = 13080224, upload-time = "2026-01-15T20:14:45.853Z" },
]
+[[package]]
+name = "setuptools"
+version = "82.0.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/4f/db/cfac1baf10650ab4d1c111714410d2fbb77ac5a616db26775db562c8fab2/setuptools-82.0.1.tar.gz", hash = "sha256:7d872682c5d01cfde07da7bccc7b65469d3dca203318515ada1de5eda35efbf9", size = 1152316, upload-time = "2026-03-09T12:47:17.221Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9d/76/f789f7a86709c6b087c5a2f52f911838cad707cc613162401badc665acfe/setuptools-82.0.1-py3-none-any.whl", hash = "sha256:a59e362652f08dcd477c78bb6e7bd9d80a7995bc73ce773050228a348ce2e5bb", size = 1006223, upload-time = "2026-03-09T12:47:15.026Z" },
+]
+
[[package]]
name = "sniffio"
version = "1.3.1"
@@ -1559,10 +1580,11 @@ wheels = [
[[package]]
name = "together"
-version = "2.9.0"
+version = "2.10.0"
source = { editable = "." }
dependencies = [
{ name = "anyio" },
+ { name = "asciichartpy" },
{ name = "cyclopts" },
{ name = "detect-agent" },
{ name = "distro" },
@@ -1628,6 +1650,7 @@ requires-dist = [
{ name = "aiofiles", marker = "extra == 'aiofiles'", specifier = ">=25.0.0" },
{ name = "aiohttp", marker = "extra == 'aiohttp'" },
{ name = "anyio", specifier = ">=3.5.0,<5" },
+ { name = "asciichartpy", specifier = ">=0.7.0" },
{ name = "cyclopts", specifier = ">=4.6.0" },
{ name = "detect-agent", specifier = ">=0.2.0" },
{ name = "distro", specifier = ">=1.7.0,<2" },