diff --git a/Cargo.lock b/Cargo.lock index 9aa8ceb0f2..73b3a06f36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2656,8 +2656,7 @@ checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "dynamic-graphql" version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0466a5cdd914efd439f0904a84a17506ec4757f4abf35f9e46f3f314ee13fe75" +source = "git+https://github.com/miratepuffin/dynamic-graphql?branch=add-arg-descriptions#69a07c5fe3c16b4baf76f676c96cde5865cae1de" dependencies = [ "async-graphql", "dynamic-graphql-derive", @@ -2667,8 +2666,7 @@ dependencies = [ [[package]] name = "dynamic-graphql-derive" version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6027c3698e530bf88b37a618a05fd7a5e761dc2777771d5757ff07103f66189" +source = "git+https://github.com/miratepuffin/dynamic-graphql?branch=add-arg-descriptions#69a07c5fe3c16b4baf76f676c96cde5865cae1de" dependencies = [ "Inflector", "darling 0.20.11", diff --git a/Cargo.toml b/Cargo.toml index c96a4e6114..eb7e7e59a9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,7 +72,7 @@ optd-core = { version = "0.18.0", path = "optd/optd/core" } async-graphql = { version = "7.2.1", features = ["dynamic-schema"] } bincode = { version = "2", features = ["serde"] } async-graphql-poem = "7.2.1" -dynamic-graphql = "0.10.1" +dynamic-graphql = { git = "https://github.com/miratepuffin/dynamic-graphql", branch = "add-arg-descriptions" } derive_more = "2.1.1" tikv-jemallocator = "0.6.1" reqwest = { version = "0.12.28", default-features = false, features = [ diff --git a/python/python/raphtory/__init__.py b/python/python/raphtory/__init__.py index 377f3b409f..2b26bda251 100644 --- a/python/python/raphtory/__init__.py +++ b/python/python/raphtory/__init__.py @@ -9,6 +9,7 @@ _sys.modules["raphtory.graphql"] = graphql _sys.modules["raphtory.filter"] = filter _sys.modules["raphtory.gql"] = gql +_sys.modules["raphtory.iterables"] = iterables __doc__ = _raphtory.__doc__ if hasattr(_raphtory, "__all__"): diff --git a/python/python/raphtory/__init__.pyi b/python/python/raphtory/__init__.pyi index 161dcf5fee..4a51fc481d 100644 --- a/python/python/raphtory/__init__.pyi +++ b/python/python/raphtory/__init__.pyi @@ -19,13 +19,17 @@ from raphtory.algorithms import * from raphtory.vectors import * from raphtory.node_state import * from raphtory.graphql import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * @@ -400,8 +404,16 @@ class GraphView(object): GraphView: Returns a graph clone """ - def materialize_at(self, path): - """Materializes the graph view into a graphql compatible folder.""" + def materialize_at(self, path: str | PathLike) -> GraphView: + """ + Materializes the graph view into a folder on disk. + + Arguments: + path (str | PathLike): destination folder for the materialised graph. + + Returns: + GraphView: the materialised graph at `path`. + """ @property def metadata(self) -> Metadata: @@ -660,7 +672,7 @@ class GraphView(object): def vectorise( self, - model, + model: VectorCache, nodes: bool | str = True, edges: bool | str = True, verbose: bool = False, @@ -669,10 +681,9 @@ class GraphView(object): Create a VectorisedGraph from the current graph. Args: - embedding (Callable[[list], list]): Specify the embedding function used to vectorise documents into embeddings. + model (VectorCache): Cache wrapping the embedding model used to embed documents. nodes (bool | str): Enable for nodes to be embedded, disable for nodes to not be embedded or specify a custom document property to use if a string is provided. Defaults to True. edges (bool | str): Enable for edges to be embedded, disable for edges to not be embedded or specify a custom document property to use if a string is provided. Defaults to True. - cache (str, optional): Path used to store the cache of embeddings. verbose (bool): Enable to print logs reporting progress. Defaults to False. Returns: @@ -1342,9 +1353,17 @@ class Graph(GraphView): """ class PersistentGraph(GraphView): - """A temporal graph that allows edges and nodes to be deleted.""" + """ + A temporal graph that allows edges and nodes to be deleted. - def __new__(cls, path=None, config=None) -> PersistentGraph: + Arguments: + path (str | PathLike, optional): The path for persisting the graph (only works with disk storage enabled). Defaults to None. + config (Config, optional): Storage/config overrides. Defaults to None. + """ + + def __new__( + cls, path: Optional[str | PathLike] = None, config: Optional[Config] = None + ) -> PersistentGraph: """Create and return a new object. See help(type) for accurate signature.""" def __reduce__(self): ... @@ -3127,7 +3146,7 @@ class PathFromNode(object): """ @property - def earliest_time(self): + def earliest_time(self) -> OptionEventTimeIterable: """ The earliest time of each node. @@ -3249,7 +3268,7 @@ class PathFromNode(object): """ @property - def id(self): + def id(self) -> GIDIterable: """ The node IDs. @@ -3292,7 +3311,7 @@ class PathFromNode(object): """ @property - def latest_time(self): + def latest_time(self) -> OptionEventTimeIterable: """ The latest time of each node. @@ -3334,7 +3353,7 @@ class PathFromNode(object): """ @property - def name(self): + def name(self) -> StringIterable: """ The node names. @@ -3352,7 +3371,7 @@ class PathFromNode(object): """ @property - def node_type(self): + def node_type(self) -> OptionArcStringIterable: """ The node types. @@ -3605,7 +3624,7 @@ class PathFromGraph(object): PathFromGraph: The layered view """ - def degree(self): + def degree(self) -> NestedUsizeIterable: """ Returns the node degrees. @@ -3614,7 +3633,7 @@ class PathFromGraph(object): """ @property - def earliest_time(self): + def earliest_time(self) -> NestedOptionEventTimeIterable: """ The node earliest times. @@ -3622,7 +3641,7 @@ class PathFromGraph(object): NestedOptionEventTimeIterable: """ - def edge_history_count(self): + def edge_history_count(self) -> NestedUsizeIterable: """ Returns the number of edge updates for each node. @@ -3736,7 +3755,7 @@ class PathFromGraph(object): """ @property - def history(self): + def history(self) -> NestedHistoryIterable: """ Returns a history object for each node with time entries for when a node is added or change to a node is made. @@ -3745,7 +3764,7 @@ class PathFromGraph(object): """ @property - def id(self): + def id(self) -> NestedGIDIterable: """ The node ids @@ -3753,7 +3772,7 @@ class PathFromGraph(object): NestedGIDIterable: """ - def in_degree(self): + def in_degree(self) -> NestedUsizeIterable: """ Returns the node in-degrees. @@ -3788,7 +3807,7 @@ class PathFromGraph(object): """ @property - def latest_time(self): + def latest_time(self) -> NestedOptionEventTimeIterable: """ The node latest times. @@ -3821,7 +3840,7 @@ class PathFromGraph(object): """ @property - def metadata(self): + def metadata(self) -> MetadataListList: """ Returns the node metadata. @@ -3830,7 +3849,7 @@ class PathFromGraph(object): """ @property - def name(self): + def name(self) -> NestedStringIterable: """ The node names. @@ -3848,7 +3867,7 @@ class PathFromGraph(object): """ @property - def node_type(self): + def node_type(self) -> NestedOptionArcStringIterable: """ The node types. @@ -3856,7 +3875,7 @@ class PathFromGraph(object): NestedOptionArcStringIterable: """ - def out_degree(self): + def out_degree(self) -> NestedUsizeIterable: """ Returns the node out-degrees. @@ -3883,7 +3902,7 @@ class PathFromGraph(object): """ @property - def properties(self): + def properties(self) -> PyNestedPropsIterable: """ Returns the node properties. @@ -4051,7 +4070,7 @@ class MutableNode(Node): t: TimeInput, properties: Optional[PropInput] = None, event_id: Optional[int] = None, - layer=None, + layer: Optional[str] = None, ) -> None: """ Add updates to a node in the graph at a specified time. @@ -4064,6 +4083,7 @@ class MutableNode(Node): is of type Prop representing the property value. If None, no properties are updated. event_id (int, optional): The optional integer which will be used as an event id. + layer (str, optional): The layer this update is recorded under. Defaults to None. Returns: None: This function does not return a value, if the operation is successful. @@ -4650,7 +4670,7 @@ class Edges(object): """ @property - def deletions(self): + def deletions(self) -> HistoryIterable: """ Returns a history object for each edge containing their deletion times. @@ -4668,7 +4688,7 @@ class Edges(object): """ @property - def earliest_time(self): + def earliest_time(self) -> OptionEventTimeIterable: """ Returns the earliest time of the edges. @@ -4778,7 +4798,7 @@ class Edges(object): """ @property - def history(self): + def history(self) -> HistoryIterable: """ Returns a history object for each edge containing time entries for when the edge is added or change to the edge is made. @@ -4787,7 +4807,7 @@ class Edges(object): """ @property - def id(self): + def id(self) -> GIDGIDIterable: """ Returns all ids of the edges. @@ -4795,7 +4815,7 @@ class Edges(object): GIDGIDIterable: """ - def is_active(self): + def is_active(self) -> BoolIterable: """ Check if the edges are active (there is at least one update during this time). @@ -4803,7 +4823,7 @@ class Edges(object): BoolIterable: """ - def is_deleted(self): + def is_deleted(self) -> BoolIterable: """ Check if the edges are deleted. @@ -4811,7 +4831,7 @@ class Edges(object): BoolIterable: """ - def is_self_loop(self): + def is_self_loop(self) -> BoolIterable: """ Check if the edges are on the same node. @@ -4819,7 +4839,7 @@ class Edges(object): BoolIterable: """ - def is_valid(self): + def is_valid(self) -> BoolIterable: """ Check if the edges are valid (i.e. not deleted). @@ -4836,7 +4856,7 @@ class Edges(object): """ @property - def latest_time(self): + def latest_time(self) -> OptionEventTimeIterable: """ Returns the latest times of the edges. @@ -4857,7 +4877,7 @@ class Edges(object): """ @property - def layer_name(self): + def layer_name(self) -> ArcStringIterable: """ Get the layer name that all edges belong to - assuming they only belong to one layer @@ -4866,7 +4886,7 @@ class Edges(object): """ @property - def layer_names(self): + def layer_names(self) -> ArcStringVecIterable: """ Get the layer names that all edges belong to - assuming they only belong to one layer. @@ -5017,7 +5037,7 @@ class Edges(object): """ @property - def time(self): + def time(self) -> EventTimeIterable: """ Returns the times of exploded edges @@ -5148,7 +5168,7 @@ class NestedEdges(object): """ @property - def deletions(self): + def deletions(self) -> NestedHistoryIterable: """ Returns a history object for each edge containing their deletion times. @@ -5166,7 +5186,7 @@ class NestedEdges(object): """ @property - def earliest_time(self): + def earliest_time(self) -> NestedOptionEventTimeIterable: """ Returns the earliest time of the edges. @@ -5276,7 +5296,7 @@ class NestedEdges(object): """ @property - def history(self): + def history(self) -> NestedHistoryIterable: """ Returns a history object for each edge containing time entries for when the edge is added or change to the edge is made. @@ -5285,7 +5305,7 @@ class NestedEdges(object): """ @property - def id(self): + def id(self) -> NestedGIDGIDIterable: """ Returns all ids of the edges. @@ -5293,7 +5313,7 @@ class NestedEdges(object): NestedGIDGIDIterable: """ - def is_active(self): + def is_active(self) -> NestedBoolIterable: """ Check if the edges are active (there is at least one update during this time). @@ -5301,7 +5321,7 @@ class NestedEdges(object): NestedBoolIterable: """ - def is_deleted(self): + def is_deleted(self) -> NestedBoolIterable: """ Check if edges are deleted. @@ -5309,7 +5329,7 @@ class NestedEdges(object): NestedBoolIterable: """ - def is_self_loop(self): + def is_self_loop(self) -> NestedBoolIterable: """ Check if the edges are on the same node. @@ -5317,7 +5337,7 @@ class NestedEdges(object): NestedBoolIterable: """ - def is_valid(self): + def is_valid(self) -> NestedBoolIterable: """ Check if edges are valid (i.e., not deleted). @@ -5334,7 +5354,7 @@ class NestedEdges(object): """ @property - def latest_time(self): + def latest_time(self) -> NestedOptionEventTimeIterable: """ Returns the latest time of the edges. @@ -5355,7 +5375,7 @@ class NestedEdges(object): """ @property - def layer_name(self): + def layer_name(self) -> NestedArcStringIterable: """ Returns the name of the layer the edges belong to - assuming they only belong to one layer. @@ -5364,7 +5384,7 @@ class NestedEdges(object): """ @property - def layer_names(self): + def layer_names(self) -> NestedArcStringVecIterable: """ Returns the names of the layers the edges belong to. @@ -5385,7 +5405,7 @@ class NestedEdges(object): """ @property - def metadata(self): + def metadata(self) -> MetadataListList: """ Get a view of the metadata only. @@ -5403,7 +5423,7 @@ class NestedEdges(object): """ @property - def properties(self): + def properties(self) -> PyNestedPropsIterable: """ Returns all properties of the edges @@ -5515,7 +5535,7 @@ class NestedEdges(object): """ @property - def time(self): + def time(self) -> NestedEventTimeIterable: """ Returns the times of exploded edges. @@ -5771,8 +5791,22 @@ class PyPropValueList(object): PropValue: The average of each property values, or None if count is zero. """ - def collect(self): ... - def count(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def count(self) -> int: + """ + Number of properties (or rows of properties). + + Returns: + int: + """ + def drop_none(self) -> list[PropValue]: """ Drop none. @@ -5854,33 +5888,149 @@ class PropType(object): """Return str(self).""" @staticmethod - def bool(): ... + def bool() -> PropType: + """ + Boolean type. + + Returns: + PropType: + """ + @staticmethod - def datetime(): ... + def datetime() -> PropType: + """ + Datetime type (timezone-aware). + + Returns: + PropType: + """ + @staticmethod - def f32(): ... + def decimal(scale: int) -> PropType: + """ + Arbitrary-precision decimal type with a fixed scale (number of digits + after the decimal point). + + Arguments: + scale (int): the number of digits after the decimal point. + + Returns: + PropType: + """ + @staticmethod - def f64(): ... + def f32() -> PropType: + """ + 32-bit float type. + + Returns: + PropType: + """ + @staticmethod - def i32(): ... + def f64() -> PropType: + """ + 64-bit float type. + + Returns: + PropType: + """ + @staticmethod - def i64(): ... + def i32() -> PropType: + """ + Signed 32-bit integer type. + + Returns: + PropType: + """ + @staticmethod - def list(p): ... + def i64() -> PropType: + """ + Signed 64-bit integer type. + + Returns: + PropType: + """ + @staticmethod - def map(hash_map): ... + def list(p: PropType) -> PropType: + """ + List type with a single element type. + + Arguments: + p (PropType): element type. + + Returns: + PropType: + """ + @staticmethod - def naive_datetime(): ... + def map(hash_map: dict[str, PropType]) -> PropType: + """ + Map type with string keys and typed values. + + Arguments: + hash_map (dict[str, PropType]): mapping from key name to value type. + + Returns: + PropType: + """ + + @staticmethod + def naive_datetime() -> PropType: + """ + Naive datetime type (timezone-unaware). + + Returns: + PropType: + """ + @staticmethod - def str(): ... + def str() -> PropType: + """ + String type. + + Returns: + PropType: + """ + @staticmethod - def u16(): ... + def u16() -> PropType: + """ + Unsigned 16-bit integer type. + + Returns: + PropType: + """ + @staticmethod - def u32(): ... + def u32() -> PropType: + """ + Unsigned 32-bit integer type. + + Returns: + PropType: + """ + @staticmethod - def u64(): ... + def u64() -> PropType: + """ + Unsigned 64-bit integer type. + + Returns: + PropType: + """ + @staticmethod - def u8(): ... + def u8() -> PropType: + """ + Unsigned 8-bit integer type. + + Returns: + PropType: + """ class Metadata(object): """A view of metadata of an entity""" @@ -5991,11 +6141,49 @@ class MetadataView(object): def __ne__(self, value): """Return self!=value.""" - def as_dict(self): ... - def get(self, key): ... - def items(self): ... - def keys(self): ... - def values(self): ... + def as_dict(self) -> dict[str, list]: + """ + Materialise the metadata as a plain dict mapping each key to the + list of values seen across the underlying entities. + + Returns: + dict[str, list]: + """ + + def get(self, key: str) -> Optional[PyPropValueList]: + """ + Look up a metadata value by key. + + Arguments: + key (str): metadata key. + + Returns: + Optional[PyPropValueList]: + """ + + def items(self) -> list[tuple[str, PyPropValueList]]: + """ + Pairs of `(key, value list)` for every metadata key. + + Returns: + list[tuple[str, PyPropValueList]]: + """ + + def keys(self) -> list[str]: + """ + Metadata keys present across the underlying entities. + + Returns: + list[str]: + """ + + def values(self) -> list[PyPropValueList]: + """ + Metadata values aligned with `keys()`. + + Returns: + list[PyPropValueList]: + """ class TemporalProperties(object): """A view of the temporal properties of an entity""" @@ -6151,12 +6339,12 @@ class PropertiesView(object): """ @property - def temporal(self): + def temporal(self) -> list[TemporalProperty]: """ Get a view of the temporal properties only. Returns: - List[TemporalProp]: + list[TemporalProperty]: """ def values(self) -> list[list[PropValue]]: @@ -6305,18 +6493,18 @@ class TemporalProperty(object): Optional[PropValue]: """ - def values(self): + def values(self) -> NDArray: """ Get the property values for each update. Returns: - NumpyArray: + NDArray: a numpy array of values, one per update. """ class EventTime(object): """ - Raphtory’s EventTime. - Represents a unique timepoint in the graph’s history as (timestamp, event_id). + Raphtory's EventTime. + Represents a unique timepoint in the graph's history as (timestamp, event_id). - timestamp: Number of milliseconds since the Unix epoch. - event_id: ID used for ordering between equal timestamps. @@ -6326,6 +6514,10 @@ class EventTime(object): EventTime can be converted into a timestamp or a Python datetime, and compared either by timestamp (against ints/floats/datetimes/strings), by tuple of (timestamp, event_id), or against another EventTime. + + Arguments: + timestamp (int | float | datetime | str): A time input convertible to an EventTime. + event_id (int | float | datetime | str | None): Optionally, specify the event id. Defaults to None. """ def __eq__(self, value): @@ -6352,7 +6544,11 @@ class EventTime(object): def __ne__(self, value): """Return self!=value.""" - def __new__(cls, timestamp, event_id=None) -> EventTime: + def __new__( + cls, + timestamp: int | float | datetime | str, + event_id: int | float | datetime | str | None = None, + ) -> EventTime: """Create and return a new object. See help(type) for accurate signature.""" def __repr__(self): @@ -6429,41 +6625,41 @@ class OptionalEventTime(object): """Return self!=value.""" @property - def as_tuple(self): + def as_tuple(self) -> Optional[tuple[int, int]]: """ Return this entry as a tuple of (timestamp, event_id), where the timestamp is in milliseconds if an EventTime is contained, or else None. Returns: - tuple[int,int] | None: (timestamp, event_id). + Optional[tuple[int, int]]: (timestamp, event_id). """ @property - def dt(self): + def dt(self) -> Optional[datetime]: """ Returns the UTC datetime representation of this EventTime's timestamp if an EventTime is contained, or else None. Returns: - datetime | None: The UTC datetime. + Optional[datetime]: The UTC datetime. Raises: TimeError: Returns TimeError on timestamp conversion errors (e.g. out-of-range timestamp). """ @property - def event_id(self): + def event_id(self) -> Optional[int]: """ Returns the event id used to order events within the same timestamp if an EventTime is contained, or else None. Returns: - int | None: The event id. + Optional[int]: The event id. """ - def get_event_time(self): + def get_event_time(self) -> Optional[EventTime]: """ Returns the contained EventTime if it exists, or else None. Returns: - EventTime | None: + Optional[EventTime]: """ def is_none(self) -> bool: @@ -6483,12 +6679,12 @@ class OptionalEventTime(object): """ @property - def t(self): + def t(self) -> Optional[int]: """ Returns the timestamp in milliseconds since the Unix epoch if an EventTime is contained, or else None. Returns: - int | None: Milliseconds since the Unix epoch. + Optional[int]: Milliseconds since the Unix epoch. """ class History(object): @@ -6982,30 +7178,194 @@ class Prop(object): """Return repr(self).""" @staticmethod - def bool(value): ... - def dtype(self): ... + def aware_datetime(value: datetime) -> Prop: + """ + Construct a `Prop` holding a timezone-aware datetime (stored as UTC). + + Arguments: + value (datetime): a timezone-aware datetime. Use `Prop.naive_datetime` for naive ones. + + Returns: + Prop: + """ + + @staticmethod + def bool(value: bool) -> Prop: + """ + Construct a `Prop` holding a boolean. + + Arguments: + value (bool): the value to wrap. + + Returns: + Prop: + """ + + @staticmethod + def decimal(value: Decimal | str | int | float) -> Prop: + """ + Construct a `Prop` holding an arbitrary-precision decimal. + + Arguments: + value (Decimal | str | int | float): the value to wrap. Strings must + parse as a decimal. Note that floats only have ~15-17 digits of + precision — pass a string or `decimal.Decimal` for higher precision. + + Returns: + Prop: + """ + + def dtype(self) -> PropType: + """ + Returns the `PropType` of the wrapped value. + + Returns: + PropType: + """ + @staticmethod - def f32(value): ... + def f32(value: float) -> Prop: + """ + Construct a `Prop` holding a 32-bit float. + + Arguments: + value (float): the value to wrap. + + Returns: + Prop: + """ + + @staticmethod + def f64(value: float) -> Prop: + """ + Construct a `Prop` holding a 64-bit float. + + Arguments: + value (float): the value to wrap. + + Returns: + Prop: + """ + @staticmethod - def f64(value): ... + def i32(value: int) -> Prop: + """ + Construct a `Prop` holding a signed 32-bit integer. + + Arguments: + value (int): the value to wrap. + + Returns: + Prop: + """ + @staticmethod - def i32(value): ... + def i64(value: int) -> Prop: + """ + Construct a `Prop` holding a signed 64-bit integer. + + Arguments: + value (int): the value to wrap. + + Returns: + Prop: + """ + @staticmethod - def i64(value): ... + def list(values: list) -> Prop: + """ + Construct a `Prop` holding a list of values. + + Arguments: + values (list): the values to wrap. + + Returns: + Prop: + """ + @staticmethod - def list(values): ... + def map(dict: dict[str, Any]) -> Prop: + """ + Construct a `Prop` holding a string-keyed map of values. + + Arguments: + dict (dict[str, Any]): the map to wrap. + + Returns: + Prop: + """ + @staticmethod - def map(dict): ... + def naive_datetime(value: datetime) -> Prop: + """ + Construct a `Prop` holding a naive (timezone-unaware) datetime. + + Arguments: + value (datetime): the value to wrap (any tz info is dropped). + + Returns: + Prop: + """ + @staticmethod - def str(value): ... + def str(value: str) -> Prop: + """ + Construct a `Prop` holding a string. + + Arguments: + value (str): the value to wrap. + + Returns: + Prop: + """ + @staticmethod - def u16(value): ... + def u16(value: int) -> Prop: + """ + Construct a `Prop` holding an unsigned 16-bit integer. + + Arguments: + value (int): the value to wrap. + + Returns: + Prop: + """ + @staticmethod - def u32(value): ... + def u32(value: int) -> Prop: + """ + Construct a `Prop` holding an unsigned 32-bit integer. + + Arguments: + value (int): the value to wrap. + + Returns: + Prop: + """ + @staticmethod - def u64(value): ... + def u64(value: int) -> Prop: + """ + Construct a `Prop` holding an unsigned 64-bit integer. + + Arguments: + value (int): the value to wrap. + + Returns: + Prop: + """ + @staticmethod - def u8(value): ... + def u8(value: int) -> Prop: + """ + Construct a `Prop` holding an unsigned 8-bit integer. + + Arguments: + value (int): the value to wrap. + + Returns: + Prop: + """ def version() -> str: """ diff --git a/python/python/raphtory/algorithms/__init__.pyi b/python/python/raphtory/algorithms/__init__.pyi index c185492cf8..03a058a0cc 100644 --- a/python/python/raphtory/algorithms/__init__.pyi +++ b/python/python/raphtory/algorithms/__init__.pyi @@ -19,13 +19,17 @@ import raphtory.filter as filter from raphtory.vectors import * from raphtory.node_state import * from raphtory.graphql import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * @@ -204,7 +208,7 @@ def directed_graph_density(graph: GraphView) -> float: float: Directed graph density of graph. """ -def degree_centrality(graph: GraphView): +def degree_centrality(graph: GraphView) -> OutputNodeState: """ Computes the degree centrality of all nodes in the graph. The values are normalized by dividing each result with the maximum possible degree. Graphs with self-loops can have @@ -214,10 +218,10 @@ def degree_centrality(graph: GraphView): graph (GraphView): The graph view on which the operation is to be performed. Returns: - PyOutputNodeState: NodeState mapping nodes to their associated degree centrality. + OutputNodeState: NodeState mapping nodes to their associated degree centrality. """ -def alternating_mask(graph: GraphView): +def alternating_mask(graph: GraphView) -> OutputNodeState: """ Alternating mask algorithm. It is a mock algorithm suitable only for testing purposes. @@ -225,7 +229,7 @@ def alternating_mask(graph: GraphView): graph (GraphView): The graph view on which the operation is to be performed. Returns: - PyOutputNodeState: NodeState mapping nodes to their associated alternating masks. + OutputNodeState: NodeState mapping nodes to their associated alternating masks. """ def max_degree(graph: GraphView) -> int: @@ -300,7 +304,7 @@ def pagerank( max_diff: Optional[float] = None, use_l2_norm: bool = True, damping_factor: float = 0.85, -): +) -> OutputNodeState: """ Pagerank -- pagerank centrality value of the nodes in a graph @@ -318,7 +322,7 @@ def pagerank( damping_factor (float): The damping factor for the PageRank calculation. Defaults to 0.85. Returns: - PyOutputNodeState: NodeState mapping nodes to their pagerank score. + OutputNodeState: NodeState mapping nodes to their pagerank score. """ def single_source_shortest_path( @@ -409,7 +413,7 @@ def local_clustering_coefficient(graph: GraphView, v: NodeInput) -> float: float: the local clustering coefficient of node v in graph. """ -def local_clustering_coefficient_batch(graph: Any, v: Any = None): +def local_clustering_coefficient_batch(graph: Any, v: Any = None) -> OutputNodeState: """ Returns the Local clustering coefficient (batch, intersection) for each specified node in a graph. This measures the degree to which one or multiple nodes in a graph tend to cluster together. @@ -420,7 +424,7 @@ def local_clustering_coefficient_batch(graph: Any, v: Any = None): v: vec of node ids, if empty, will return results for every node in the graph Returns: - PyOutputNodeState: Mapping of vertices to lcc score + OutputNodeState: Mapping of vertices to lcc score """ def weakly_connected_components(graph: GraphView) -> NodeStateUsize: @@ -437,7 +441,7 @@ def weakly_connected_components(graph: GraphView) -> NodeStateUsize: NodeStateUsize: Mapping of nodes to their component ids. """ -def strongly_connected_components(graph: GraphView): +def strongly_connected_components(graph: GraphView) -> OutputNodeState: """ Strongly connected components @@ -447,7 +451,7 @@ def strongly_connected_components(graph: GraphView): graph (GraphView): Raphtory graph Returns: - PyOutputNodeState: NodeState mapping nodes to their component ids + OutputNodeState: NodeState mapping nodes to their component ids """ def in_components( @@ -596,7 +600,7 @@ def global_temporal_three_node_motif_multi( """ def local_temporal_three_node_motifs( - graph: GraphView, delta: int, threads=None + graph: GraphView, delta: int, threads: Optional[int] = None ) -> NodeStateMotifs: """ Computes the number of each type of motif that each node participates in. See global_temporal_three_node_motifs for a summary of the motifs involved. @@ -604,6 +608,7 @@ def local_temporal_three_node_motifs( Arguments: graph (GraphView): A directed raphtory graph delta (int): Maximum time difference between the first and last edge of the motif. NB if time for edges was given as a UNIX epoch, this should be given in seconds, otherwise milliseconds should be used (if edge times were given as string) + threads (int, optional): Number of threads to use. Defaults to None. Returns: NodeStateMotifs: A mapping from nodes to lists of motif counts (40 counts in the same order as the global motif counts) with the number of each motif that node participates in. @@ -656,18 +661,18 @@ def balance( """ def label_propagation( - graph: GraphView, iter_count: Any = 20, seed: Optional[bytes] = None -): + graph: GraphView, iter_count: int = 20, seed: Optional[bytes] = None +) -> OutputNodeState: """ Computes components using a label propagation algorithm Arguments: graph (GraphView): A reference to the graph - iter_count: Number of iterations + iter_count (int): Number of iterations. Defaults to 20. seed (bytes, optional): Array of 32 bytes of u8 which is set as the rng seed Returns: - PyOutputNodeState: NodeState mapping nodes to community id + OutputNodeState: NodeState mapping nodes to community id """ diff --git a/python/python/raphtory/filter/__init__.pyi b/python/python/raphtory/filter/__init__.pyi index 9eb7ac3078..05eb32788e 100644 --- a/python/python/raphtory/filter/__init__.pyi +++ b/python/python/raphtory/filter/__init__.pyi @@ -15,13 +15,17 @@ from raphtory.algorithms import * from raphtory.vectors import * from raphtory.node_state import * from raphtory.graphql import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * @@ -89,14 +93,29 @@ class FilterOps(object): def __ne__(self, value): """Return self!=value.""" - def all(self): - """Requires that **all** elements match when the underlying property is list-like.""" + def all(self) -> filter.PropertyFilterOps: + """ + Requires that **all** elements match when the underlying property is list-like. + + Returns: + filter.PropertyFilterOps: + """ - def any(self): - """Requires that **any** element matches when the underlying property is list-like.""" + def any(self) -> filter.PropertyFilterOps: + """ + Requires that **any** element matches when the underlying property is list-like. - def avg(self): - """Averages list elements when the underlying property is numeric and list-like.""" + Returns: + filter.PropertyFilterOps: + """ + + def avg(self) -> filter.PropertyFilterOps: + """ + Averages list elements when the underlying property is numeric and list-like. + + Returns: + filter.PropertyFilterOps: + """ def contains(self, value: Prop) -> filter.FilterExpr: """ @@ -120,8 +139,13 @@ class FilterOps(object): filter.FilterExpr: A filter expression evaluating suffix matching. """ - def first(self): - """Selects the first element when the underlying property is list-like.""" + def first(self) -> filter.PropertyFilterOps: + """ + Selects the first element when the underlying property is list-like. + + Returns: + filter.PropertyFilterOps: + """ def fuzzy_search( self, prop_value: str, levenshtein_distance: int, prefix_match: bool @@ -178,17 +202,37 @@ class FilterOps(object): filter.FilterExpr: A filter expression evaluating `value is not None`. """ - def last(self): - """Selects the last element when the underlying property is list-like.""" + def last(self) -> filter.PropertyFilterOps: + """ + Selects the last element when the underlying property is list-like. - def len(self): - """Returns the list length when the underlying property is list-like.""" + Returns: + filter.PropertyFilterOps: + """ - def max(self): - """Returns the maximum list element when the underlying property is list-like.""" + def len(self) -> filter.PropertyFilterOps: + """ + Returns the list length when the underlying property is list-like. - def min(self): - """Returns the minimum list element when the underlying property is list-like.""" + Returns: + filter.PropertyFilterOps: + """ + + def max(self) -> filter.PropertyFilterOps: + """ + Returns the maximum list element when the underlying property is list-like. + + Returns: + filter.PropertyFilterOps: + """ + + def min(self) -> filter.PropertyFilterOps: + """ + Returns the minimum list element when the underlying property is list-like. + + Returns: + filter.PropertyFilterOps: + """ def not_contains(self, value: Prop) -> filter.FilterExpr: """ @@ -212,8 +256,13 @@ class FilterOps(object): filter.FilterExpr: A filter expression evaluating prefix matching. """ - def sum(self): - """Sums list elements when the underlying property is numeric and list-like.""" + def sum(self) -> filter.PropertyFilterOps: + """ + Sums list elements when the underlying property is numeric and list-like. + + Returns: + filter.PropertyFilterOps: + """ class PropertyFilterOps(FilterOps): """ @@ -244,7 +293,7 @@ class Node(object): """ @staticmethod - def after(time: int): + def after(time: int) -> filter.NodeViewPropsFilterBuilder: """ Restricts node evaluation to times strictly after the given time. @@ -252,11 +301,11 @@ class Node(object): time (int): Lower time bound. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ @staticmethod - def at(time: int): + def at(time: int) -> filter.NodeViewPropsFilterBuilder: """ Restricts node evaluation to a single point in time. @@ -264,11 +313,11 @@ class Node(object): time (int): Event time. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ @staticmethod - def before(time: int): + def before(time: int) -> filter.NodeViewPropsFilterBuilder: """ Restricts node evaluation to times strictly before the given time. @@ -276,40 +325,51 @@ class Node(object): time (int): Upper time bound. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ @staticmethod - def by_state_column(state, col): ... + def by_state_column(state: OutputNodeState, col: str) -> filter.FilterExpr: + """ + Build a node filter from a boolean column of an existing node-state result. + + Arguments: + state (OutputNodeState): A pre-computed node state (e.g. from an algorithm). + col (str): Name of the boolean column on `state` whose values determine inclusion. + + Returns: + filter.FilterExpr: + """ + @staticmethod - def id(): + def id() -> filter.NodeIdFilterBuilder: """ Selects the node ID field for filtering. Returns: - filter.NodeIdFilterBuilder + filter.NodeIdFilterBuilder: """ @staticmethod - def is_active(): + def is_active() -> filter.FilterExpr: """ Matches nodes that have at least one event in the current view. Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def latest(): + def latest() -> filter.NodeViewPropsFilterBuilder: """ Evaluates filters against the latest available state of each node. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ @staticmethod - def layer(layer: str): + def layer(layer: str) -> filter.NodeViewPropsFilterBuilder: """ Restricts evaluation to nodes belonging to the given layer. @@ -317,11 +377,11 @@ class Node(object): layer (str): Layer name. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ @staticmethod - def layers(layers: list[str]): + def layers(layers: list[str]) -> filter.NodeViewPropsFilterBuilder: """ Restricts evaluation to nodes belonging to any of the given layers. @@ -329,11 +389,11 @@ class Node(object): layers (list[str]): Layer names. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ @staticmethod - def metadata(name: str): + def metadata(name: str) -> filter.FilterOps: """ Filters a node metadata field by name. @@ -343,29 +403,29 @@ class Node(object): name (str): Metadata key. Returns: - filter.FilterOps + filter.FilterOps: """ @staticmethod - def name(): + def name() -> filter.NodeNameFilterBuilder: """ Selects the node name field for filtering. Returns: - filter.NodeNameFilterBuilder + filter.NodeNameFilterBuilder: """ @staticmethod - def node_type(): + def node_type() -> filter.NodeTypeFilterBuilder: """ Selects the node type field for filtering. Returns: - filter.NodeTypeFilterBuilder + filter.NodeTypeFilterBuilder: """ @staticmethod - def property(name: str): + def property(name: str) -> filter.PropertyFilterOps: """ Filters a node property by name. @@ -375,11 +435,11 @@ class Node(object): name (str): Property key. Returns: - filter.PropertyFilterOps + filter.PropertyFilterOps: """ @staticmethod - def snapshot_at(time: int): + def snapshot_at(time: int) -> filter.NodeViewPropsFilterBuilder: """ Evaluates filters against a snapshot of the graph at a given time. @@ -387,20 +447,20 @@ class Node(object): time (int): Snapshot time. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ @staticmethod - def snapshot_latest(): + def snapshot_latest() -> filter.NodeViewPropsFilterBuilder: """ Evaluates filters against the most recent snapshot of the graph. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ @staticmethod - def window(start: int, end: int): + def window(start: int, end: int) -> filter.NodeViewPropsFilterBuilder: """ Restricts node evaluation to the given time window. @@ -411,7 +471,7 @@ class Node(object): end (int): End time. Returns: - filter.NodeViewPropsFilterBuilder + filter.NodeViewPropsFilterBuilder: """ class NodeIdFilterBuilder(object): @@ -791,7 +851,7 @@ class Edge(object): """ @staticmethod - def after(time: int): + def after(time: int) -> filter.EdgeViewPropsFilterBuilder: """ Restricts edge evaluation to times strictly after the given time. @@ -799,11 +859,11 @@ class Edge(object): time (int): Lower time bound. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def at(time: int): + def at(time: int) -> filter.EdgeViewPropsFilterBuilder: """ Restricts edge evaluation to a single point in time. @@ -811,11 +871,11 @@ class Edge(object): time (int): Event time. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def before(time: int): + def before(time: int) -> filter.EdgeViewPropsFilterBuilder: """ Restricts edge evaluation to times strictly before the given time. @@ -823,65 +883,65 @@ class Edge(object): time (int): Upper time bound. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def dst(): + def dst() -> filter.EdgeEndpoint: """ Selects the edge **destination endpoint** for filtering. Returns: - filter.EdgeEndpoint + filter.EdgeEndpoint: """ @staticmethod - def is_active(): + def is_active() -> filter.FilterExpr: """ Matches edges that have at least one event in the current view. Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def is_deleted(): + def is_deleted() -> filter.FilterExpr: """ Matches edges that have been deleted. Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def is_self_loop(): + def is_self_loop() -> filter.FilterExpr: """ Matches edges that are self-loops (source == destination). Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def is_valid(): + def is_valid() -> filter.FilterExpr: """ Matches edges that are structurally valid in the current view. Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def latest(): + def latest() -> filter.EdgeViewPropsFilterBuilder: """ Evaluates edge predicates against the latest available edge state. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def layer(layer: str): + def layer(layer: str) -> filter.EdgeViewPropsFilterBuilder: """ Restricts evaluation to edges belonging to the given layer. @@ -889,11 +949,11 @@ class Edge(object): layer (str): Layer name. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def layers(layers: list[str]): + def layers(layers: list[str]) -> filter.EdgeViewPropsFilterBuilder: """ Restricts evaluation to edges belonging to any of the given layers. @@ -901,11 +961,11 @@ class Edge(object): layers (list[str]): Layer names. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def metadata(name: str): + def metadata(name: str) -> filter.FilterOps: """ Filters an edge metadata field by name. @@ -915,11 +975,11 @@ class Edge(object): name (str): Metadata key. Returns: - filter.FilterOps + filter.FilterOps: """ @staticmethod - def property(name: str): + def property(name: str) -> filter.PropertyFilterOps: """ Filters an edge property by name. @@ -929,11 +989,11 @@ class Edge(object): name (str): Property key. Returns: - filter.PropertyFilterOps + filter.PropertyFilterOps: """ @staticmethod - def snapshot_at(time: int): + def snapshot_at(time: int) -> filter.EdgeViewPropsFilterBuilder: """ Evaluates edge predicates against a snapshot of the graph at a given time. @@ -941,29 +1001,29 @@ class Edge(object): time (int): Snapshot time. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def snapshot_latest(): + def snapshot_latest() -> filter.EdgeViewPropsFilterBuilder: """ Evaluates edge predicates against the most recent snapshot of the graph. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def src(): + def src() -> filter.EdgeEndpoint: """ Selects the edge **source endpoint** for filtering. Returns: - filter.EdgeEndpoint + filter.EdgeEndpoint: """ @staticmethod - def window(start: int, end: int): + def window(start: int, end: int) -> filter.EdgeViewPropsFilterBuilder: """ Restricts edge evaluation to the given time window. @@ -974,7 +1034,7 @@ class Edge(object): end (int): End time. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ class EdgeEndpoint(object): @@ -991,15 +1051,15 @@ class EdgeEndpoint(object): Edge.src().property("country") == "UK" """ - def id(self): + def id(self) -> filter.EdgeEndpointIdFilter: """ Selects the endpoint node ID field for filtering. Returns: - filter.EdgeEndpointIdFilter + filter.EdgeEndpointIdFilter: """ - def metadata(self, name: str): + def metadata(self, name: str) -> filter.FilterOps: """ Filters an endpoint node metadata field by name. @@ -1009,26 +1069,26 @@ class EdgeEndpoint(object): name (str): Metadata key. Returns: - filter.FilterOps + filter.FilterOps: """ - def name(self): + def name(self) -> filter.EdgeEndpointNameFilter: """ Selects the endpoint node name field for filtering. Returns: - filter.EdgeEndpointNameFilter + filter.EdgeEndpointNameFilter: """ - def node_type(self): + def node_type(self) -> filter.EdgeEndpointTypeFilter: """ Selects the endpoint node type field for filtering. Returns: - filter.EdgeEndpointTypeFilter + filter.EdgeEndpointTypeFilter: """ - def property(self, name: str): + def property(self, name: str) -> filter.PropertyFilterOps: """ Filters an endpoint node property by name. @@ -1038,7 +1098,7 @@ class EdgeEndpoint(object): name (str): Property key. Returns: - filter.PropertyFilterOps + filter.PropertyFilterOps: """ class EdgeEndpointIdFilter(object): @@ -1415,7 +1475,7 @@ class ExplodedEdge(object): """ @staticmethod - def after(time: int): + def after(time: int) -> filter.EdgeViewPropsFilterBuilder: """ Restricts exploded edge evaluation to times strictly after the given time. @@ -1423,11 +1483,11 @@ class ExplodedEdge(object): time (int): Lower time bound. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def at(time: int): + def at(time: int) -> filter.EdgeViewPropsFilterBuilder: """ Restricts exploded edge evaluation to a single point in time. @@ -1435,11 +1495,11 @@ class ExplodedEdge(object): time (int): Event time. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def before(time: int): + def before(time: int) -> filter.EdgeViewPropsFilterBuilder: """ Restricts exploded edge evaluation to times strictly before the given time. @@ -1447,56 +1507,56 @@ class ExplodedEdge(object): time (int): Upper time bound. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def is_active(): + def is_active() -> filter.FilterExpr: """ Matches exploded edges that have at least one event in the current view. Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def is_deleted(): + def is_deleted() -> filter.FilterExpr: """ Matches exploded edges that have been deleted. Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def is_self_loop(): + def is_self_loop() -> filter.FilterExpr: """ Matches exploded edges that are self-loops (source == destination). Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def is_valid(): + def is_valid() -> filter.FilterExpr: """ Matches exploded edges that are structurally valid in the current view. Returns: - filter.FilterExpr + filter.FilterExpr: """ @staticmethod - def latest(): + def latest() -> filter.EdgeViewPropsFilterBuilder: """ Evaluates exploded edge predicates against the latest available state. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def layer(layer: str): + def layer(layer: str) -> filter.EdgeViewPropsFilterBuilder: """ Restricts evaluation to exploded edges belonging to the given layer. @@ -1504,11 +1564,11 @@ class ExplodedEdge(object): layer (str): Layer name. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def layers(layers: list[str]): + def layers(layers: list[str]) -> filter.EdgeViewPropsFilterBuilder: """ Restricts evaluation to exploded edges belonging to any of the given layers. @@ -1516,11 +1576,11 @@ class ExplodedEdge(object): layers (list[str]): Layer names. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def metadata(name: str): + def metadata(name: str) -> filter.FilterOps: """ Filters an exploded edge metadata field by name. @@ -1530,11 +1590,11 @@ class ExplodedEdge(object): name (str): Metadata key. Returns: - filter.FilterOps + filter.FilterOps: """ @staticmethod - def property(name: str): + def property(name: str) -> filter.PropertyFilterOps: """ Filters an exploded edge property by name. @@ -1544,11 +1604,11 @@ class ExplodedEdge(object): name (str): Property key. Returns: - filter.PropertyFilterOps + filter.PropertyFilterOps: """ @staticmethod - def snapshot_at(time: int): + def snapshot_at(time: int) -> filter.EdgeViewPropsFilterBuilder: """ Evaluates exploded edge predicates against a snapshot of the graph at a given time. @@ -1556,20 +1616,20 @@ class ExplodedEdge(object): time (int): Snapshot time. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def snapshot_latest(): + def snapshot_latest() -> filter.EdgeViewPropsFilterBuilder: """ Evaluates exploded edge predicates against the most recent snapshot of the graph. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ @staticmethod - def window(start: int, end: int): + def window(start: int, end: int) -> filter.EdgeViewPropsFilterBuilder: """ Restricts exploded edge evaluation to the given time window. @@ -1580,7 +1640,7 @@ class ExplodedEdge(object): end (int): End time. Returns: - filter.EdgeViewPropsFilterBuilder + filter.EdgeViewPropsFilterBuilder: """ class Graph(object): @@ -1603,7 +1663,7 @@ class Graph(object): """ @staticmethod - def after(time: int): + def after(time: int) -> filter.ViewFilterBuilder: """ Restricts evaluation to times strictly after the given time. @@ -1611,11 +1671,11 @@ class Graph(object): time (int): Lower time bound. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ @staticmethod - def at(time: int): + def at(time: int) -> filter.ViewFilterBuilder: """ Restricts evaluation to a single point in time. @@ -1623,11 +1683,11 @@ class Graph(object): time (int): Event time. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ @staticmethod - def before(time: int): + def before(time: int) -> filter.ViewFilterBuilder: """ Restricts evaluation to times strictly before the given time. @@ -1635,20 +1695,20 @@ class Graph(object): time (int): Upper time bound. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ @staticmethod - def latest(): + def latest() -> filter.ViewFilterBuilder: """ Evaluates filters against the latest available state of the graph. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ @staticmethod - def layer(layer: str): + def layer(layer: str) -> filter.ViewFilterBuilder: """ Restricts evaluation to a single layer. @@ -1656,11 +1716,11 @@ class Graph(object): layer (str): Layer name. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ @staticmethod - def layers(layers: list[str]): + def layers(layers: list[str]) -> filter.ViewFilterBuilder: """ Restricts evaluation to any of the given layers. @@ -1668,11 +1728,11 @@ class Graph(object): layers (list[str]): Layer names. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ @staticmethod - def snapshot_at(time: int): + def snapshot_at(time: int) -> filter.ViewFilterBuilder: """ Evaluates filters against a snapshot of the graph at a given time. @@ -1680,20 +1740,20 @@ class Graph(object): time (int): Snapshot time. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ @staticmethod - def snapshot_latest(): + def snapshot_latest() -> filter.ViewFilterBuilder: """ Evaluates filters against the most recent snapshot of the graph. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ @staticmethod - def window(start: int, end: int): + def window(start: int, end: int) -> filter.ViewFilterBuilder: """ Restricts evaluation to events within a time window. @@ -1704,5 +1764,5 @@ class Graph(object): end (int): End time. Returns: - filter.ViewFilterBuilder + filter.ViewFilterBuilder: """ diff --git a/python/python/raphtory/gql/__init__.pyi b/python/python/raphtory/gql/__init__.pyi index f63d8d0b80..52dc70413d 100644 --- a/python/python/raphtory/gql/__init__.pyi +++ b/python/python/raphtory/gql/__init__.pyi @@ -20,9 +20,12 @@ from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * diff --git a/python/python/raphtory/graph_gen/__init__.pyi b/python/python/raphtory/graph_gen/__init__.pyi index 91b8aa18d6..84c2b739ef 100644 --- a/python/python/raphtory/graph_gen/__init__.pyi +++ b/python/python/raphtory/graph_gen/__init__.pyi @@ -20,20 +20,26 @@ from raphtory.algorithms import * from raphtory.vectors import * from raphtory.node_state import * from raphtory.graphql import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * __all__ = ["random_attachment", "ba_preferential_attachment"] -def random_attachment(g: Any, nodes_to_add: Any, edges_per_step: Any, seed: Any = None): +def random_attachment( + g: Any, nodes_to_add: Any, edges_per_step: Any, seed: Any = None +) -> None: """ Generates a graph using the random attachment model @@ -48,12 +54,12 @@ def random_attachment(g: Any, nodes_to_add: Any, edges_per_step: Any, seed: Any seed: The seed used in rng, an array of length 32 containing ints (ints must have a max size of u8) Returns: - None + None: """ def ba_preferential_attachment( g: Any, nodes_to_add: Any, edges_per_step: Any, seed: Any = None -): +) -> None: """ Generates a graph using the preferential attachment model. @@ -75,6 +81,5 @@ def ba_preferential_attachment( seed: The seed used in rng, an array of length 32 containing ints (ints must have a max size of u8) Returns: - - None + None: """ diff --git a/python/python/raphtory/graph_loader/__init__.pyi b/python/python/raphtory/graph_loader/__init__.pyi index 16af5328ff..e5901cb17f 100644 --- a/python/python/raphtory/graph_loader/__init__.pyi +++ b/python/python/raphtory/graph_loader/__init__.pyi @@ -20,13 +20,17 @@ from raphtory.algorithms import * from raphtory.vectors import * from raphtory.node_state import * from raphtory.graphql import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * @@ -70,7 +74,7 @@ def lotr_graph_with_props() -> Graph: """ def neo4j_movie_graph( - uri: str, username: str, password: str, database: str = ... + uri: str, username: str, password: str, database: str = "neo4j" ) -> Graph: """ Returns the neo4j movie graph example. @@ -79,7 +83,7 @@ def neo4j_movie_graph( uri (str): username (str): password (str): - database (str): + database (str): Neo4j database name. Defaults to "neo4j". Returns: Graph: diff --git a/python/python/raphtory/graphql/__init__.pyi b/python/python/raphtory/graphql/__init__.pyi index d8a8620080..b5c21e2146 100644 --- a/python/python/raphtory/graphql/__init__.pyi +++ b/python/python/raphtory/graphql/__init__.pyi @@ -15,13 +15,17 @@ import raphtory.filter as filter from raphtory.algorithms import * from raphtory.vectors import * from raphtory.node_state import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * @@ -57,6 +61,7 @@ class GraphServer(object): cache_tti_seconds (int, optional): the inactive time in seconds after which a graph is evicted from the cache log_level (str, optional): the log level for the server tracing (bool, optional): whether tracing should be enabled + tracing_level (str, optional): tracing verbosity (e.g. "ERROR", "WARN", "INFO", "DEBUG", "TRACE"). otlp_agent_host (str, optional): OTLP agent host for tracing otlp_agent_port(str, optional): OTLP agent port for tracing otlp_tracing_service_name (str, optional): The OTLP tracing service name @@ -75,6 +80,7 @@ class GraphServer(object): max_recursive_depth (int, optional): Internal safety limit to prevent stack overflows from pathologically structured queries (async-graphql default is 32). max_directives_per_field (int, optional): Maximum number of directives on any single field. disable_introspection (bool, optional): If True, schema introspection is disabled entirely. + permissions_store_path (str | PathLike, optional): Path to the permissions store (used by the optional auth extension). """ def __new__( @@ -84,7 +90,7 @@ class GraphServer(object): cache_tti_seconds: Optional[int] = None, log_level: Optional[str] = None, tracing: Optional[bool] = None, - tracing_level=None, + tracing_level: Optional[str] = None, otlp_agent_host: Optional[str] = None, otlp_agent_port: Optional[str] = None, otlp_tracing_service_name: Optional[str] = None, @@ -103,7 +109,7 @@ class GraphServer(object): max_recursive_depth: Optional[int] = None, max_directives_per_field: Optional[int] = None, disable_introspection: Optional[bool] = None, - permissions_store_path=None, + permissions_store_path: Optional[str | PathLike] = None, ) -> GraphServer: """Create and return a new object. See help(type) for accurate signature.""" @@ -133,15 +139,20 @@ class GraphServer(object): RunningGraphServer: The running server """ - def turn_off_index(self): - """Turn off index for all graphs""" + def turn_off_index(self) -> None: + """ + Turn off index for all graphs. + + Returns: + None: + """ def vectorise_all_graphs( self, embeddings: OpenAIEmbeddings, nodes: bool | str = True, edges: bool | str = True, - ): + ) -> None: """ Vectorise all graphs in the server working directory. @@ -149,6 +160,9 @@ class GraphServer(object): embeddings (OpenAIEmbeddings): the embeddings to use nodes (bool | str): if nodes have to be embedded or not or the custom template to use if a str is provided. Defaults to True. edges (bool | str): if edges have to be embedded or not or the custom template to use if a str is provided. Defaults to True. + + Returns: + None: """ def vectorise_graph( @@ -157,7 +171,7 @@ class GraphServer(object): embeddings: OpenAIEmbeddings, nodes: bool | str = True, edges: bool | str = True, - ): + ) -> None: """ Vectorise the graph name in the server working directory. @@ -166,6 +180,9 @@ class GraphServer(object): embeddings (OpenAIEmbeddings): the embeddings to use nodes (bool | str): if nodes have to be embedded or not or the custom template to use if a str is provided. Defaults to True. edges (bool | str): if edges have to be embedded or not or the custom template to use if a str is provided. Defaults to True. + + Returns: + None: """ class RunningGraphServer(object): @@ -173,20 +190,20 @@ class RunningGraphServer(object): def __enter__(self): ... def __exit__(self, _exc_type, _exc_val, _exc_tb): ... - def get_client(self): + def get_client(self) -> RaphtoryClient: """ - Get the client for the server + Get the client for the server. Returns: - RaphtoryClient: the client + RaphtoryClient: the client. """ - def stop(self): + def stop(self) -> None: """ - Stop the server and wait for it to finish + Stop the server and wait for it to finish. Returns: - None: + None: """ class RaphtoryClient(object): @@ -213,14 +230,16 @@ class RaphtoryClient(object): None: """ - def create_index(self, path: Any, index_spec, in_ram: bool = True) -> None: + def create_index( + self, path: str, index_spec: RemoteIndexSpec, in_ram: bool = True + ) -> None: """ Create Index for graph on the server at 'path' Arguments: - path: the path of the graph to be created - RemoteIndexSpec (RemoteIndexSpec): spec specifying the properties that need to be indexed - in_ram (bool): create index in ram + path (str): the path of the graph to be created + index_spec (RemoteIndexSpec): spec specifying the properties that need to be indexed + in_ram (bool): create index in ram. Defaults to True. Returns: None: @@ -765,36 +784,48 @@ class AllPropertySpec(object): def __repr__(self): """Return repr(self).""" -def encode_graph(graph): +def encode_graph(graph: Graph | PersistentGraph) -> str: """ Encode a graph using Base64 encoding Arguments: - graph (Graph | PersistentGraph): the graph + graph (Graph | PersistentGraph): the graph Returns: - str: the encoded graph + str: the encoded graph """ -def decode_graph(graph): +def decode_graph(graph: str) -> Union[Graph, PersistentGraph]: """ Decode a Base64-encoded graph Arguments: - graph (str): the encoded graph + graph (str): the encoded graph Returns: - Union[Graph, PersistentGraph]: the decoded graph + Union[Graph, PersistentGraph]: the decoded graph """ -def schema(): +def schema() -> str: """ Returns the raphtory graphql server schema - Returns - str: Graphql schema + Returns: + str: Graphql schema + """ + +def cli() -> None: + """ + Run the Raphtory GraphQL CLI from Python. Uses `sys.argv` for arguments. + + Returns: + None: """ -def cli(): ... -def has_permissions_extension(): - """Returns True if the permissions extension (raphtory-auth) is compiled in.""" +def has_permissions_extension() -> bool: + """ + Returns True if the permissions extension (raphtory-auth) is compiled in. + + Returns: + bool: True if the extension is built in, False otherwise. + """ diff --git a/python/python/raphtory/iterables/__init__.pyi b/python/python/raphtory/iterables/__init__.pyi index 89fce51348..982ed7cc2c 100644 --- a/python/python/raphtory/iterables/__init__.pyi +++ b/python/python/raphtory/iterables/__init__.pyi @@ -16,13 +16,17 @@ from raphtory.algorithms import * from raphtory.vectors import * from raphtory.node_state import * from raphtory.graphql import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore @@ -75,6 +79,16 @@ __all__ = [ "NestedResultUtcDateTimeIterable", "MetadataListList", "PyNestedPropsIterable", + "PyPropValueListList", + "PyTemporalPropsList", + "PyTemporalPropsListList", + "PyPropHistValueList", + "PyPropHistValueListList", + "PyTemporalPropList", + "PyTemporalPropListList", + "PyPropHistItemsList", + "PyPropHistItemsListList", + "PropIterable", ] class NestedUtcDateTimeIterable(object): @@ -105,7 +119,13 @@ class NestedUtcDateTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ class NestedGIDIterable(object): def __eq__(self, value): @@ -135,9 +155,29 @@ class NestedGIDIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def min(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def max(self) -> Any: + """ + Per-row maximum value (or None for empty rows). + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Per-row minimum value (or None for empty rows). + + Returns: + Any: + """ class GIDIterable(object): def __eq__(self, value): @@ -167,9 +207,29 @@ class GIDIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def min(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ class StringIterable(object): def __eq__(self, value): @@ -199,7 +259,13 @@ class StringIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ class OptionArcStringIterable(object): def __eq__(self, value): @@ -229,7 +295,13 @@ class OptionArcStringIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ class UsizeIterable(object): def __eq__(self, value): @@ -259,11 +331,45 @@ class UsizeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def mean(self): ... - def min(self): ... - def sum(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def mean(self) -> float: + """ + Mean of all values in the iterable. + + Returns: + float: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def sum(self) -> Any: + """ + Sum of all values in the iterable. + + Returns: + Any: + """ class OptionI64Iterable(object): def __eq__(self, value): @@ -293,9 +399,29 @@ class OptionI64Iterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def min(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ class NestedOptionArcStringIterable(object): def __eq__(self, value): @@ -325,7 +451,13 @@ class NestedOptionArcStringIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ class NestedStringIterable(object): def __eq__(self, value): @@ -355,7 +487,13 @@ class NestedStringIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ class NestedOptionI64Iterable(object): def __eq__(self, value): @@ -385,9 +523,29 @@ class NestedOptionI64Iterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def min(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def max(self) -> Any: + """ + Per-row maximum value (or None for empty rows). + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Per-row minimum value (or None for empty rows). + + Returns: + Any: + """ class NestedI64VecIterable(object): def __eq__(self, value): @@ -417,7 +575,13 @@ class NestedI64VecIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ class NestedUsizeIterable(object): def __eq__(self, value): @@ -447,11 +611,45 @@ class NestedUsizeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def mean(self): ... - def min(self): ... - def sum(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def max(self) -> Any: + """ + Per-row maximum value (or None for empty rows). + + Returns: + Any: + """ + + def mean(self) -> Any: + """ + Per-row mean of values (one entry per outer row). + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Per-row minimum value (or None for empty rows). + + Returns: + Any: + """ + + def sum(self) -> Any: + """ + Per-row sum of values (one entry per outer row). + + Returns: + Any: + """ class BoolIterable(object): def __eq__(self, value): @@ -481,7 +679,13 @@ class BoolIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ class ArcStringIterable(object): def __iter__(self): @@ -493,7 +697,13 @@ class ArcStringIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ class NestedVecUtcDateTimeIterable(object): def __eq__(self, value): @@ -523,7 +733,13 @@ class NestedVecUtcDateTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ class OptionVecUtcDateTimeIterable(object): def __eq__(self, value): @@ -553,7 +769,13 @@ class OptionVecUtcDateTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ class GIDGIDIterable(object): def __eq__(self, value): @@ -583,9 +805,29 @@ class GIDGIDIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def min(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ class NestedGIDGIDIterable(object): def __eq__(self, value): @@ -615,9 +857,29 @@ class NestedGIDGIDIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def min(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def max(self) -> Any: + """ + Per-row maximum value (or None for empty rows). + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Per-row minimum value (or None for empty rows). + + Returns: + Any: + """ class NestedBoolIterable(object): def __eq__(self, value): @@ -647,7 +909,13 @@ class NestedBoolIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ class U64Iterable(object): def __eq__(self, value): @@ -677,11 +945,45 @@ class U64Iterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def mean(self): ... - def min(self): ... - def sum(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def mean(self) -> float: + """ + Mean of all values in the iterable. + + Returns: + float: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def sum(self) -> Any: + """ + Sum of all values in the iterable. + + Returns: + Any: + """ class OptionUtcDateTimeIterable(object): def __eq__(self, value): @@ -711,7 +1013,13 @@ class OptionUtcDateTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ class ArcStringVecIterable(object): def __eq__(self, value): @@ -741,7 +1049,13 @@ class ArcStringVecIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ class NestedArcStringVecIterable(object): def __eq__(self, value): @@ -771,7 +1085,13 @@ class NestedArcStringVecIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ class NestedEventTimeIterable(object): def __eq__(self, value): @@ -801,7 +1121,14 @@ class NestedEventTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + @property def dt(self) -> NestedResultUtcDateTimeIterable: """ @@ -823,8 +1150,22 @@ class NestedEventTimeIterable(object): NestedUsizeIterable: Nested iterable of event ids associated to each EventTime. """ - def max(self): ... - def min(self): ... + def max(self) -> Any: + """ + Per-row maximum value (or None for empty rows). + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Per-row minimum value (or None for empty rows). + + Returns: + Any: + """ + @property def t(self) -> NestedI64Iterable: """ @@ -844,7 +1185,13 @@ class NestedArcStringIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ class NestedOptionEventTimeIterable(object): def __eq__(self, value): @@ -874,7 +1221,14 @@ class NestedOptionEventTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + @property def dt(self) -> NestedResultOptionUtcDateTimeIterable: """ @@ -896,8 +1250,22 @@ class NestedOptionEventTimeIterable(object): NestedOptionUsizeIterable: Nested iterable of event ids associated to each EventTime, if available. """ - def max(self): ... - def min(self): ... + def max(self) -> Any: + """ + Per-row maximum value (or None for empty rows). + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Per-row minimum value (or None for empty rows). + + Returns: + Any: + """ + @property def t(self) -> NestedOptionI64Iterable: """ @@ -997,7 +1365,14 @@ class EventTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + @property def dt(self) -> ResultUtcDateTimeIterable: """ @@ -1019,18 +1394,32 @@ class EventTimeIterable(object): UsizeIterable: Iterable of event ids associated to each EventTime. """ - def max(self): ... - def min(self): ... - @property - def t(self) -> I64Iterable: + def max(self) -> Any: """ - Change this Iterable of EventTime into an Iterable of corresponding Unix timestamps in milliseconds. + Maximum value in the iterable, or `None` if empty. Returns: - I64Iterable: Iterable of millisecond timestamps since the Unix epoch for each EventTime. + Any: """ -class OptionEventTimeIterable(object): + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + @property + def t(self) -> I64Iterable: + """ + Change this Iterable of EventTime into an Iterable of corresponding Unix timestamps in milliseconds. + + Returns: + I64Iterable: Iterable of millisecond timestamps since the Unix epoch for each EventTime. + """ + +class OptionEventTimeIterable(object): def __eq__(self, value): """Return self==value.""" @@ -1058,7 +1447,14 @@ class OptionEventTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + @property def dt(self) -> ResultOptionUtcDateTimeIterable: """ @@ -1080,8 +1476,22 @@ class OptionEventTimeIterable(object): OptionUsizeIterable: Iterable of event ids associated to each EventTime, if available. """ - def max(self): ... - def min(self): ... + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + @property def t(self) -> OptionI64Iterable: """ @@ -1280,9 +1690,29 @@ class OptionUsizeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def min(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ class ResultOptionUtcDateTimeIterable(object): def __iter__(self): @@ -1294,7 +1724,13 @@ class ResultOptionUtcDateTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[Optional[datetime]]: + """ + Materialise the iterable as a list of optional datetimes, raising if any element produced an error. + + Returns: + list[Optional[datetime]]: one entry per element (None where absent). + """ class I64Iterable(object): def __eq__(self, value): @@ -1324,11 +1760,45 @@ class I64Iterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def mean(self): ... - def min(self): ... - def sum(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def mean(self) -> float: + """ + Mean of all values in the iterable. + + Returns: + float: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def sum(self) -> Any: + """ + Sum of all values in the iterable. + + Returns: + Any: + """ class ResultUtcDateTimeIterable(object): def __iter__(self): @@ -1340,7 +1810,13 @@ class ResultUtcDateTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[datetime]: + """ + Materialise the iterable as a list of datetimes, raising if any element produced an error. + + Returns: + list[datetime]: one datetime per element. + """ class NestedHistoryTimestampIterable(object): def __iter__(self): @@ -1528,9 +2004,29 @@ class NestedOptionUsizeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def min(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def max(self) -> Any: + """ + Per-row maximum value (or None for empty rows). + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Per-row minimum value (or None for empty rows). + + Returns: + Any: + """ class NestedResultOptionUtcDateTimeIterable(object): def __iter__(self): @@ -1542,7 +2038,13 @@ class NestedResultOptionUtcDateTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list[Optional[datetime]]]: + """ + Materialise the iterable as a nested list of optional datetimes, raising if any element produced an error. + + Returns: + list[list[Optional[datetime]]]: one inner list per outer element (entries are None where absent). + """ class NestedI64Iterable(object): def __eq__(self, value): @@ -1572,11 +2074,45 @@ class NestedI64Iterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def mean(self): ... - def min(self): ... - def sum(self): ... + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def max(self) -> Any: + """ + Per-row maximum value (or None for empty rows). + + Returns: + Any: + """ + + def mean(self) -> Any: + """ + Per-row mean of values (one entry per outer row). + + Returns: + Any: + """ + + def min(self) -> Any: + """ + Per-row minimum value (or None for empty rows). + + Returns: + Any: + """ + + def sum(self) -> Any: + """ + Per-row sum of values (one entry per outer row). + + Returns: + Any: + """ class NestedResultUtcDateTimeIterable(object): def __iter__(self): @@ -1588,7 +2124,13 @@ class NestedResultUtcDateTimeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... + def collect(self) -> list[list[datetime]]: + """ + Materialise the iterable as a nested list of datetimes, raising if any element produced an error. + + Returns: + list[list[datetime]]: one inner list per outer element. + """ class MetadataListList(object): def __contains__(self, key): @@ -1618,11 +2160,48 @@ class MetadataListList(object): def __ne__(self, value): """Return self!=value.""" - def as_dict(self): ... - def get(self, key): ... - def items(self): ... - def keys(self): ... - def values(self): ... + def as_dict(self) -> dict[str, list]: + """ + Materialise as a dict mapping each key to a list of value lists. + + Returns: + dict[str, list]: + """ + + def get(self, key: str) -> Optional[PyPropValueListList]: + """ + Look up the metadata for `key` across all entities. + + Arguments: + key (str): metadata key. + + Returns: + Optional[PyPropValueListList]: + """ + + def items(self) -> list[tuple[str, PyPropValueListList]]: + """ + Pairs of `(key, value list-of-lists)` for every metadata key. + + Returns: + list[tuple[str, PyPropValueListList]]: + """ + + def keys(self) -> list[str]: + """ + Metadata keys present across the underlying entities. + + Returns: + list[str]: + """ + + def values(self) -> list[PyPropValueListList]: + """ + Per-key list of value lists. + + Returns: + list[PyPropValueListList]: + """ class PyNestedPropsIterable(object): def __contains__(self, key): @@ -1660,7 +2239,7 @@ class PyNestedPropsIterable(object): dict[str, List[List[PropValue]]]: """ - def get(self, key: str): + def get(self, key: str) -> Optional[PyPropValueListList]: """ Get property value. @@ -1668,7 +2247,7 @@ class PyNestedPropsIterable(object): key (str): the name of the property. Returns: - PyPropValueListList: + Optional[PyPropValueListList]: """ def items(self) -> list[Tuple[str, List[PropValue]]]: @@ -1679,21 +2258,21 @@ class PyNestedPropsIterable(object): list[Tuple[str, List[PropValue]]]: """ - def keys(self): + def keys(self) -> list[str]: """ Get the names for all properties. Returns: - List[Str]: + list[str]: """ @property - def temporal(self): + def temporal(self) -> list[list[TemporalProperty]]: """ Get a view of the temporal properties only. Returns: - List[List[temporalprop]]: + list[list[TemporalProperty]]: """ def values(self) -> list[list[list[PropValue]]]: @@ -1704,3 +2283,793 @@ class PyNestedPropsIterable(object): Returns: list[list[list[PropValue]]]: """ + +class PyPropValueListList(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self list[PropValue]: + """ + Compute the average of all property values. Alias for mean(). + + Returns: + list[PropValue]: + """ + + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def count(self) -> UsizeIterable: + """ + Number of properties (or rows of properties). + + Returns: + UsizeIterable: + """ + + def drop_none(self) -> PyPropValueListList: + """ + Drop missing entries from each row. + + Returns: + PyPropValueListList: + """ + + def flatten(self) -> PyPropValueList: + """ + Flatten the nested iterable into a single list of values. + + Returns: + PyPropValueList: + """ + + def max(self) -> list[PropValue]: + """ + Find the maximum property value and its associated time. + + Returns: + list[PropValue]: + """ + + def mean(self) -> PyPropValueList: + """ + Mean property value across each row. + + Returns: + PyPropValueList: + """ + + def median(self) -> PyPropValueList: + """ + Median property value across each row. + + Returns: + PyPropValueList: + """ + + def min(self) -> list[PropValue]: + """ + Min property value. + + Returns: + list[PropValue]: + """ + + def sum(self) -> list[PropValue]: + """ + Sum of property values. + + Returns: + list[PropValue]: + """ + +class PyTemporalPropsList(object): + def __contains__(self, key): + """Return bool(key in self).""" + + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __getitem__(self, key): + """Return self[key].""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __lt__(self, value): + """Return self Optional[PyTemporalPropList]: + """ + Look up a temporal property by key. + + Arguments: + key (str): property key. + + Returns: + Optional[PyTemporalPropList]: + """ + + def histories(self) -> dict[str, PyPropHistItemsList]: + """ + Full update history of each property across the underlying entities. + + Returns: + dict[str, PyPropHistItemsList]: + """ + + def items(self) -> list[tuple[str, PyTemporalPropList]]: + """ + Pairs of `(key, temporal property list)` for every property key. + + Returns: + list[tuple[str, PyTemporalPropList]]: + """ + + def keys(self) -> list[str]: + """ + Property keys present across the underlying entities. + + Returns: + list[str]: + """ + + def latest(self) -> dict[str, PyPropValueList]: + """ + Latest value of each property across the underlying entities. + + Returns: + dict[str, PyPropValueList]: + """ + + def values(self) -> list[PyTemporalPropList]: + """ + Per-key list of temporal property views. + + Returns: + list[PyTemporalPropList]: + """ + +class PyTemporalPropsListList(object): + def __contains__(self, key): + """Return bool(key in self).""" + + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __getitem__(self, key): + """Return self[key].""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __lt__(self, value): + """Return self Optional[PyTemporalPropListList]: + """ + Look up a nested temporal property by key. + + Arguments: + key (str): property key. + + Returns: + Optional[PyTemporalPropListList]: + """ + + def histories(self) -> dict[str, PyPropHistItemsListList]: + """ + Full update history of each property across the nested entities. + + Returns: + dict[str, PyPropHistItemsListList]: + """ + + def items(self) -> list[tuple[str, PyTemporalPropListList]]: + """ + Pairs of `(key, nested temporal property list)` for every property key. + + Returns: + list[tuple[str, PyTemporalPropListList]]: + """ + + def keys(self) -> list[str]: + """ + Property keys present across the underlying entities. + + Returns: + list[str]: + """ + + def latest(self) -> dict[str, PyPropValueListList]: + """ + Latest value of each property across the nested entities. + + Returns: + dict[str, PyPropValueListList]: + """ + + def values(self) -> list[PyTemporalPropListList]: + """ + Per-key list of nested temporal property views. + + Returns: + list[PyTemporalPropListList]: + """ + +class PyPropHistValueList(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self list[PropValue]: + """ + Compute the average of all property values. Alias for mean(). + + Returns: + list[PropValue]: + """ + + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def count(self) -> UsizeIterable: + """ + Number of properties (or rows of properties). + + Returns: + UsizeIterable: + """ + + def flatten(self) -> PropIterable: + """ + Flatten the per-row history values into a single iterable of values. + + Returns: + PropIterable: + """ + + def max(self) -> list[PropValue]: + """ + Find the maximum property value and its associated time. + + Returns: + list[PropValue]: + """ + + def mean(self) -> list[PropValue]: + """ + Compute the mean of all property values. + + Returns: + list[PropValue]: The mean of each property values, or None if count is zero. + """ + + def median(self) -> PyPropValueList: + """ + Median property value of each row. + + Returns: + PyPropValueList: + """ + + def min(self) -> list[PropValue]: + """ + Min property value. + + Returns: + list[PropValue]: + """ + + def sum(self) -> list[PropValue]: + """ + Sum of property values. + + Returns: + list[PropValue]: + """ + +class PyPropHistValueListList(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def count(self) -> NestedUsizeIterable: + """ + Number of properties (or rows of properties). + + Returns: + NestedUsizeIterable: + """ + + def flatten(self) -> PyPropHistValueList: + """ + Flatten the nested history-values list to a single history-values list. + + Returns: + PyPropHistValueList: + """ + + def max(self) -> list[list[PropValue]]: + """ + Find the maximum property value and its associated time. + + Returns: + list[list[PropValue]]: + """ + + def mean(self) -> PyPropValueListList: + """ + Mean property value across each row. + + Returns: + PyPropValueListList: + """ + + def median(self) -> list[list[PropValue]]: + """ + Median + + Returns: + list[list[PropValue]]: + """ + + def min(self) -> list[list[PropValue]]: + """ + Min property value. + + Returns: + list[list[PropValue]]: + """ + + def sum(self) -> list[list[PropValue]]: + """ + Sum of property values. + + Returns: + list[list[PropValue]]: + """ + +class PyTemporalPropList(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self PyPropValueList: + """ + Value of each entity's property at the given time (latest update at or before `t`). + + Arguments: + t (TimeInput): the time at which to evaluate the property. + + Returns: + PyPropValueList: + """ + + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + @property + def history(self) -> HistoryIterable: + """ + Update history (one history per underlying entity). + + Returns: + HistoryIterable: + """ + + def items(self) -> PyPropHistItemsList: + """ + Per-entity list of `(time, value)` pairs across each entity's history. + + Returns: + PyPropHistItemsList: + """ + + def value(self) -> PyPropValueList: + """ + Latest value of each entity's property. + + Returns: + PyPropValueList: + """ + + def values(self) -> PyPropHistValueList: + """ + Per-entity list of property values across each entity's history. + + Returns: + PyPropHistValueList: + """ + +class PyTemporalPropListList(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self PyPropValueListList: + """ + Value of each inner entity's property at the given time. + + Arguments: + t (TimeInput): the time at which to evaluate the property. + + Returns: + PyPropValueListList: + """ + + def collect(self) -> list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + + def flatten(self) -> PyTemporalPropList: + """ + Flatten the nested temporal property list to a single list of temporal properties. + + Returns: + PyTemporalPropList: + """ + + @property + def history(self) -> NestedHistoryIterable: + """ + Update history (per outer entity, per inner entity). + + Returns: + NestedHistoryIterable: + """ + + def items(self) -> PyPropHistItemsListList: + """ + Nested list of `(time, value)` pairs across each inner entity's history. + + Returns: + PyPropHistItemsListList: + """ + + def value(self) -> PyPropValueListList: + """ + Latest value of each inner entity's property. + + Returns: + PyPropValueListList: + """ + + def values(self) -> PyPropHistValueListList: + """ + Nested list of property values across each inner entity's history. + + Returns: + PyPropHistValueListList: + """ + +class PyPropHistItemsList(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + +class PyPropHistItemsListList(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self list[list]: + """ + Materialise the nested iterable as a list of lists. + + Returns: + list[list]: + """ + +class PropIterable(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self PropValue: + """ + Compute the average of all property values. Alias for mean(). + + Returns: + PropValue: The average of each property values, or None if count is zero. + """ + + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def count(self) -> int: + """ + Number of properties (or rows of properties). + + Returns: + int: + """ + + def max(self) -> PropValue: + """ + Find the maximum property value and its associated time. + + Returns: + PropValue: + """ + + def mean(self) -> PropValue: + """ + Compute the mean of all property values. + + Returns: + PropValue: The mean of each property values, or None if count is zero. + """ + + def median(self) -> PropValue: + """ + Median property values. + + Returns: + PropValue: + """ + + def min(self) -> PropValue: + """ + Min property value. + + Returns: + PropValue: + """ + + def sum(self) -> PropValue: + """ + Sum of property values. + + Returns: + PropValue: + """ diff --git a/python/python/raphtory/node_state/__init__.pyi b/python/python/raphtory/node_state/__init__.pyi index 4420f2f28d..6e8c79273c 100644 --- a/python/python/raphtory/node_state/__init__.pyi +++ b/python/python/raphtory/node_state/__init__.pyi @@ -15,13 +15,17 @@ import raphtory.filter as filter from raphtory.algorithms import * from raphtory.vectors import * from raphtory.graphql import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * @@ -75,6 +79,7 @@ __all__ = [ "NodeStateSEIR", "NodeLayout", "NodeStateF64String", + "OutputNodeState", ] class NodeGroups(object): @@ -2506,16 +2511,18 @@ class EarliestDateTimeView(object): NodeStateOptionDateTime: the computed `NodeState` """ - def get(self, node: NodeInput, default=...) -> Optional[datetime]: + def get( + self, node: NodeInput, default: Optional[datetime] = None + ) -> Optional[datetime]: """ Get value for node Arguments: node (NodeInput): the node - default (Optional[datetime]): the default value. Defaults to None. + default (Optional[datetime]): the default value. Defaults to None. Returns: - Optional[datetime]: the value for the node or the default value + Optional[datetime]: the value for the node or the default value """ def groups(self) -> NodeGroups: @@ -5223,11 +5230,45 @@ class UsizeIterable(object): def __repr__(self): """Return repr(self).""" - def collect(self): ... - def max(self): ... - def mean(self): ... - def min(self): ... - def sum(self): ... + def collect(self) -> list: + """ + Materialise the iterable as a Python list. + + Returns: + list: + """ + + def max(self) -> Any: + """ + Maximum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def mean(self) -> float: + """ + Mean of all values in the iterable. + + Returns: + float: + """ + + def min(self) -> Any: + """ + Minimum value in the iterable, or `None` if empty. + + Returns: + Any: + """ + + def sum(self) -> Any: + """ + Sum of all values in the iterable. + + Returns: + Any: + """ class NodeTypeView(object): """A lazy view over node values""" @@ -7721,3 +7762,148 @@ class NodeStateF64String(object): Returns: Iterator[Tuple[float, str]]: Iterator over values """ + +class OutputNodeState(object): + def __eq__(self, value): + """Return self==value.""" + + def __ge__(self, value): + """Return self>=value.""" + + def __getitem__(self, key): + """Return self[key].""" + + def __gt__(self, value): + """Return self>value.""" + + def __iter__(self): + """Implement iter(self).""" + + def __le__(self, value): + """Return self<=value.""" + + def __len__(self): + """Return len(self).""" + + def __lt__(self, value): + """Return self OutputNodeState: + """ + Get OutputNodeState from Parquet + + Arguments: + file_path (str): filepath from which to read OutputNodeState + id_column (str): column to which node IDs will be written. Defaults to "id". + + Returns: + OutputNodeState: + """ + + def get(self, node: NodeInput, default: Optional[dict] = None) -> Optional[dict]: + """ + Get value for node + + Arguments: + node (NodeInput): the node + default (dict, optional): the default value (dict of field name to value). Defaults to None. + + Returns: + Optional[dict]: the value for the node or the default value + """ + + def groups(self, cols: list[str]) -> list[tuple[dict, Nodes]]: + """ + Group by value + + Arguments: + cols (list[str]): columns by which to group nodes + + Returns: + list[tuple[dict, Nodes]]: The grouped nodes + """ + + def items(self) -> Iterator[Tuple[Node, Dict]]: + """ + Iterate over items + + Returns: + Iterator[Tuple[Node, Dict]]: Iterator over items + """ + + def merge( + self, + other: OutputNodeState, + index_merge_priority: str = "left", + default_column_merge_priority: str = "left", + column_merge_priority_map: Optional[dict] = None, + ) -> OutputNodeState: + """ + Merge with another OutputNodeState (produces new OutputNodeState) + + Arguments: + other (OutputNodeState): OutputNodeState to merge with + index_merge_priority (str): "left" or "right" to take left or right index, "union" to union index sets. Defaults to "left". + default_column_merge_priority (str): "left" or "right" to prioritize left or right columns by default, "exclude" to exclude columns by default. Defaults to "left". + column_merge_priority_map (dict, optional): map of column names (str) to merge priority ("left", "right", or "exclude"). Defaults to None. + + Returns: + OutputNodeState: + """ + + def nodes(self) -> Nodes: + """ + Iterate over nodes + + Returns: + Nodes: The nodes + """ + + def sort_by(self, sort_params: Dict) -> OutputNodeState: + """ + Get value for node + + Arguments: + sort_params (Dict): Map of sort keys to sort option ('asc' or 'desc'). None defaults to 'asc' + + Returns: + OutputNodeState: Sorted NodeState + """ + + def to_parquet(self, file_path: str, id_column: str = "id") -> None: + """ + Convert OutputNodeState to Parquet + + Arguments: + file_path (str): filepath to which OutputNodeState is written + id_column (str): column containing IDs of nodes. Defaults to "id". + + Returns: + None: + """ + + def top_k(self, sort_params: Dict, k: int) -> OutputNodeState: + """ + Get value for node + + Arguments: + sort_params (Dict): Map of sort keys to sort option ('asc' or 'desc'). None defaults to 'asc' + k (int): Number of top entries to return. + + Returns: + OutputNodeState: Sorted NodeState + """ + + def values(self) -> Iterator[Dict]: + """ + Iterate over values + + Returns: + Iterator[Dict]: Iterator over values (dict of field name to value) + """ diff --git a/python/python/raphtory/vectors/__init__.pyi b/python/python/raphtory/vectors/__init__.pyi index 4473546c13..a849877afc 100644 --- a/python/python/raphtory/vectors/__init__.pyi +++ b/python/python/raphtory/vectors/__init__.pyi @@ -15,13 +15,17 @@ import raphtory.filter as filter from raphtory.algorithms import * from raphtory.node_state import * from raphtory.graphql import * +from raphtory.gql import * from raphtory.typing import * import numpy as np from numpy.typing import NDArray from datetime import datetime +import pandas from pandas import DataFrame +import pyarrow # type: ignore[import-untyped] from pyarrow import DataType # type: ignore[import-untyped] from os import PathLike +from decimal import Decimal import networkx as nx # type: ignore import pyvis # type: ignore from raphtory.iterables import * @@ -33,6 +37,8 @@ __all__ = [ "VectorSelection", "OpenAIEmbeddings", "VectorCache", + "EmbeddingServer", + "RunningEmbeddingServer", "embedding_server", ] @@ -57,8 +63,13 @@ class VectorisedGraph(object): VectorSelection: The vector selection resulting from the search. """ - def empty_selection(self): - """Return an empty selection of entities.""" + def empty_selection(self) -> VectorSelection: + """ + Return an empty selection of entities. + + Returns: + VectorSelection: + """ def entities_by_similarity( self, @@ -96,8 +107,13 @@ class VectorisedGraph(object): VectorSelection: The vector selection resulting from the search. """ - def optimize_index(self): - """Optmize the vector index""" + def optimize_index(self) -> None: + """ + Optimise the vector index. + + Returns: + None: + """ class Document(object): """A document corresponding to a graph entity. Used to generate embeddings.""" @@ -136,7 +152,13 @@ class Embedding(object): def __repr__(self): """Return repr(self).""" - def to_arrow(self): ... + def to_arrow(self) -> pyarrow.Array: + """ + Returns the embedding as a `pyarrow.Array` of floats. + + Returns: + pyarrow.Array: + """ class VectorSelection(object): def add_edges(self, edges: list) -> None: @@ -295,19 +317,89 @@ class VectorSelection(object): """ class OpenAIEmbeddings(object): + """ + OpenAI-compatible embedding configuration. Pass an instance of this to + `VectorCache(...)` to drive `vectorise(...)`. + + Arguments: + model (str): The OpenAI embedding model to use. Defaults to "text-embedding-3-small". + api_base (str, optional): Base URL for the OpenAI-compatible API. If None, falls back to OpenAI's default endpoint. Defaults to None. + api_key_env (str, optional): Environment variable name to read the API key from. If None, reads from `OPENAI_API_KEY`. Defaults to None. + org_id (str, optional): OpenAI organization id. If None, no org id is sent. Defaults to None. + project_id (str, optional): OpenAI project id. If None, no project id is sent. Defaults to None. + dim (int, optional): Embedding dimension override. If None, the model's native dimension is used. Defaults to None. + """ + def __new__( cls, - model="text-embedding-3-small", - api_base=None, - api_key_env=None, - org_id=None, - project_id=None, - dim=None, + model: str = "text-embedding-3-small", + api_base: Optional[str] = None, + api_key_env: Optional[str] = None, + org_id: Optional[str] = None, + project_id: Optional[str] = None, + dim: Optional[int] = None, ) -> OpenAIEmbeddings: """Create and return a new object. See help(type) for accurate signature.""" class VectorCache(object): - def __new__(cls, v_cache, cache=None) -> VectorCache: + """ + Cache wrapping an embedding model. Pass to `Graph.vectorise(model=...)` + or other vectorisation entry points. + + Arguments: + v_cache (OpenAIEmbeddings): Embedding model configuration. + cache (str, optional): Path to persist the embedding cache on disk. Defaults to None. + """ + + def __new__( + cls, v_cache: OpenAIEmbeddings, cache: Optional[str] = None + ) -> VectorCache: """Create and return a new object. See help(type) for accurate signature.""" -def embedding_server(function): ... +class EmbeddingServer(object): + def run(self, port: int, host: Optional[str] = None) -> None: + """ + Run the embedding server in the foreground until it's stopped. + + Arguments: + port (int): Port to listen on. + host (str, optional): Host interface to bind to. Defaults to None. + + Returns: + None: + """ + + def start(self, port: int, host: Optional[str] = None) -> RunningEmbeddingServer: + """ + Start the embedding server in the background and return a handle. + + Arguments: + port (int): Port to listen on. + host (str, optional): Host interface to bind to. Defaults to None. + + Returns: + RunningEmbeddingServer: handle to stop the server. + """ + +class RunningEmbeddingServer(object): + def __enter__(self): ... + def __exit__(self, _exc_type, _exc_val, _exc_tb): ... + def stop(self) -> None: + """ + Stop the running embedding server. + + Returns: + None: + """ + +def embedding_server(function: Callable[[str], list[float]]) -> EmbeddingServer: + """ + Wrap a Python callable so it can be served as an OpenAI-compatible + embedding endpoint via `EmbeddingServer.serve(...)`. + + Arguments: + function (Callable[[str], list[float]]): A callable that maps a text input to its embedding vector. + + Returns: + EmbeddingServer: + """ diff --git a/python/scripts/gen-stubs.py b/python/scripts/gen-stubs.py index 870648aec6..7b91a78b6f 100755 --- a/python/scripts/gen-stubs.py +++ b/python/scripts/gen-stubs.py @@ -10,13 +10,17 @@ "from raphtory.vectors import *", "from raphtory.node_state import *", "from raphtory.graphql import *", + "from raphtory.gql import *", "from raphtory.typing import *", "import numpy as np", "from numpy.typing import NDArray", "from datetime import datetime", + "import pandas", "from pandas import DataFrame", + "import pyarrow # type: ignore[import-untyped]", "from pyarrow import DataType # type: ignore[import-untyped]", "from os import PathLike", + "from decimal import Decimal", "import networkx as nx # type: ignore", "import pyvis # type: ignore", "from raphtory.iterables import *", diff --git a/python/tests/test_base_install/test_graphql/misc/test_latest.py b/python/tests/test_base_install/test_graphql/misc/test_latest.py index 218a535e4e..ee58148aed 100644 --- a/python/tests/test_base_install/test_graphql/misc/test_latest.py +++ b/python/tests/test_base_install/test_graphql/misc/test_latest.py @@ -117,14 +117,14 @@ def test_latest_and_active(): "latest": { "list": [ { - "id": ["1", "2"], + "id": [1, 2], "history": { "list": [{"timestamp": 3, "eventId": 2}] }, }, - {"id": ["1", "3"], "history": {"list": []}}, + {"id": [1, 3], "history": {"list": []}}, { - "id": ["1", "4"], + "id": [1, 4], "history": { "list": [{"timestamp": 3, "eventId": 5}] }, @@ -139,7 +139,7 @@ def test_latest_and_active(): "latest": { "list": [ { - "id": ["1", "2"], + "id": [1, 2], "history": { "list": [{"timestamp": 3, "eventId": 2}] }, @@ -152,7 +152,7 @@ def test_latest_and_active(): "name": "3", "edges": { "latest": { - "list": [{"id": ["1", "3"], "history": {"list": []}}] + "list": [{"id": [1, 3], "history": {"list": []}}] } }, }, @@ -162,7 +162,7 @@ def test_latest_and_active(): "latest": { "list": [ { - "id": ["1", "4"], + "id": [1, 4], "history": { "list": [{"timestamp": 3, "eventId": 5}] }, @@ -202,12 +202,12 @@ def test_latest_and_active(): "latest": { "list": [ { - "id": ["1", "2"], + "id": [1, 2], "history": {"list": [{"timestamp": 3, "eventId": 2}]}, }, - {"id": ["1", "3"], "history": {"list": []}}, + {"id": [1, 3], "history": {"list": []}}, { - "id": ["1", "4"], + "id": [1, 4], "history": {"list": [{"timestamp": 3, "eventId": 5}]}, }, ] diff --git a/python/tests/test_base_install/test_graphql/test_apply_views.py b/python/tests/test_base_install/test_graphql/test_apply_views.py index 91c63d6639..298b98c73a 100644 --- a/python/tests/test_base_install/test_graphql/test_apply_views.py +++ b/python/tests/test_base_install/test_graphql/test_apply_views.py @@ -3215,7 +3215,7 @@ def test_valid_graph(): correct = { "graph": { "applyViews": { - "edges": {"list": [{"id": ["6", "7"], "latestTime": {"timestamp": 5}}]} + "edges": {"list": [{"id": [6, 7], "latestTime": {"timestamp": 5}}]} } } } diff --git a/python/tests/test_base_install/test_graphql/test_gql_graph_surface.py b/python/tests/test_base_install/test_graphql/test_gql_graph_surface.py new file mode 100644 index 0000000000..3b9815cbc2 --- /dev/null +++ b/python/tests/test_base_install/test_graphql/test_gql_graph_surface.py @@ -0,0 +1,370 @@ +"""Tests for `Graph`-level fields that previously had no GraphQL coverage: + +- `countEdges`, `countNodes`, `countTemporalEdges` +- `hasNode`, `hasEdge` (+ optional `layer:` arg on `hasEdge`) +- `earliestEdgeTime`, `latestEdgeTime` +- `uniqueLayers` +- `sharedNeighbours` + +Each field is tested under a combination of base / window / layer views to +exercise the composition plumbing, not just the field itself. + +`searchEdges` is left untested here — it's marked experimental in the schema +and requires an index-creation path that these fixtures don't exercise. +""" + +from utils import run_group_graphql_test +from raphtory import Graph + + +def create_graph() -> Graph: + graph = Graph() + + # Nodes with a node_type so `typeFilter` paths could be reused + graph.add_node(10, "A", node_type="person") + graph.add_node(10, "B", node_type="person") + graph.add_node(15, "C", node_type="org") + graph.add_node(40, "D", node_type="org") + + # Edges across two layers, including a self-loop and a reverse edge + graph.add_edge(10, "A", "B", properties={"weight": 1.0}, layer="layer1") + graph.add_edge(20, "A", "B", properties={"weight": 2.0}, layer="layer1") + graph.add_edge(30, "A", "B", properties={"weight": 3.0}, layer="layer2") + graph.add_edge(15, "A", "C", layer="layer1") + graph.add_edge(25, "A", "C", layer="layer2") + graph.add_edge(40, "C", "D", layer="layer1") + graph.add_edge(50, "B", "A", layer="layer2") + graph.add_edge(25, "A", "A", layer="layer1") # self-loop + + return graph + + +def test_graph_counts(): + """`countNodes`, `countEdges`, `countTemporalEdges` under base / window / + layer / layer+window views.""" + graph = create_graph() + queries_and_expected = [] + + # base: 4 nodes, 5 unique edges (AB, AC, CD, BA, AA), 8 temporal edge events + query = """ + { + graph(path: "g") { + countNodes + countEdges + countTemporalEdges + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "countNodes": 4, + "countEdges": 5, + "countTemporalEdges": 8, + } + }, + ) + ) + + # windowed [10, 30): drops t=30, 40, 50, keeps t=10..25. + # nodes visible: A, B, C (D not yet). edges: AB@layer1, AC@layer1, AC@layer2, AA@layer1 => 3 unique edges (AB, AC, AA). + # temporal edges in window: AB@10, AB@20, AC@15, AC@25, AA@25 => 5 + query = """ + { + graph(path: "g") { + window(start: 10, end: 30) { + countNodes + countEdges + countTemporalEdges + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "window": { + "countNodes": 3, + "countEdges": 3, + "countTemporalEdges": 5, + } + } + }, + ) + ) + + # layer(layer1) only: edges AB (2 updates), AC, CD, AA => 4 unique, 5 temporal + query = """ + { + graph(path: "g") { + layer(name: "layer1") { + countNodes + countEdges + countTemporalEdges + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "layer": { + "countNodes": 4, + "countEdges": 4, + "countTemporalEdges": 5, + } + } + }, + ) + ) + + # layer(layer1) + window [10, 30): AB@10, AB@20, AC@15, AA@25 => 3 unique, 4 temporal + query = """ + { + graph(path: "g") { + layer(name: "layer1") { + window(start: 10, end: 30) { + countNodes + countEdges + countTemporalEdges + } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "layer": { + "window": { + "countNodes": 3, + "countEdges": 3, + "countTemporalEdges": 4, + } + } + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph) + + +def test_has_node_and_has_edge(): + """`hasNode` / `hasEdge` under base / window / layer views. + + `hasEdge` also accepts an optional `layer:` arg. + """ + graph = create_graph() + queries_and_expected = [] + + # base: all exist + query = """ + { + graph(path: "g") { + hasA: hasNode(name: "A") + hasZ: hasNode(name: "Z") + edgeAB: hasEdge(src: "A", dst: "B") + edgeBA: hasEdge(src: "B", dst: "A") + edgeAD: hasEdge(src: "A", dst: "D") + edgeAB_layer1: hasEdge(src: "A", dst: "B", layer: "layer1") + edgeAB_layer2: hasEdge(src: "A", dst: "B", layer: "layer2") + edgeBA_layer1: hasEdge(src: "B", dst: "A", layer: "layer1") + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "hasA": True, + "hasZ": False, + "edgeAB": True, + "edgeBA": True, + "edgeAD": False, + "edgeAB_layer1": True, + "edgeAB_layer2": True, + "edgeBA_layer1": False, # B->A only exists on layer2 + } + }, + ) + ) + + # windowed [10, 30): D not yet present, edge CD not yet either + query = """ + { + graph(path: "g") { + window(start: 10, end: 30) { + hasD: hasNode(name: "D") + edgeCD: hasEdge(src: "C", dst: "D") + edgeAB: hasEdge(src: "A", dst: "B") + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "window": { + "hasD": False, + "edgeCD": False, + "edgeAB": True, + } + } + }, + ) + ) + + # layer(layer2): edge AB exists, BA exists on layer2, CD doesn't (layer1 only) + query = """ + { + graph(path: "g") { + layer(name: "layer2") { + edgeAB: hasEdge(src: "A", dst: "B") + edgeBA: hasEdge(src: "B", dst: "A") + edgeCD: hasEdge(src: "C", dst: "D") + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "layer": { + "edgeAB": True, + "edgeBA": True, + "edgeCD": False, + } + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph) + + +def test_edge_time_bounds_and_unique_layers(): + """`earliestEdgeTime`, `latestEdgeTime`, `uniqueLayers` base + window + layer.""" + graph = create_graph() + queries_and_expected = [] + + # base: edges from t=10 to t=50; layers layer1 + layer2 + query = """ + { + graph(path: "g") { + earliestEdgeTime { timestamp } + latestEdgeTime { timestamp } + uniqueLayers + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "earliestEdgeTime": {"timestamp": 10}, + "latestEdgeTime": {"timestamp": 50}, + "uniqueLayers": ["layer1", "layer2"], + } + }, + ) + ) + + # windowed: edges from t=15 to t=25 only + query = """ + { + graph(path: "g") { + window(start: 15, end: 30) { + earliestEdgeTime { timestamp } + latestEdgeTime { timestamp } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "window": { + "earliestEdgeTime": {"timestamp": 15}, + "latestEdgeTime": {"timestamp": 25}, + } + } + }, + ) + ) + + # layer(layer2): edges at t=25, 30, 50 + query = """ + { + graph(path: "g") { + layer(name: "layer2") { + earliestEdgeTime { timestamp } + latestEdgeTime { timestamp } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "layer": { + "earliestEdgeTime": {"timestamp": 25}, + "latestEdgeTime": {"timestamp": 50}, + } + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph) + + +def test_shared_neighbours(): + """`sharedNeighbours` returns the intersection of neighbour sets.""" + graph = create_graph() + queries_and_expected = [] + + # A's neighbours (undirected): {A (self-loop), B, C} + # B's neighbours: {A} -> shared(A,B) = {A} + # C's neighbours: {A, D} -> shared(A,C) = {A}, shared(B,C) = {} + query = """ + { + graph(path: "g") { + AB: sharedNeighbours(selectedNodes: ["A", "B"]) { name } + AC: sharedNeighbours(selectedNodes: ["A", "C"]) { name } + BC: sharedNeighbours(selectedNodes: ["B", "C"]) { name } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "AB": [{"name": "A"}], + "AC": [{"name": "A"}], + "BC": [{"name": "A"}], + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph, sort_output=True) diff --git a/python/tests/test_base_install/test_graphql/test_gql_graph_type.py b/python/tests/test_base_install/test_graphql/test_gql_graph_type.py new file mode 100644 index 0000000000..a9f9463b60 --- /dev/null +++ b/python/tests/test_base_install/test_graphql/test_gql_graph_type.py @@ -0,0 +1,273 @@ +"""`graph(path:, graphType:)` accepts an optional `graphType` argument that +re-interprets the stored graph at query time. + +- `EVENT` semantics: each update is a point-in-time event; windows only see + updates whose timestamps fall inside them. +- `PERSISTENT` semantics: values carry forward until overwritten or deleted; + an edge added at t=1 is visible in a window starting at t=5. + +These tests confirm both forms of override work both ways, and that omitting +the argument preserves the graph's native type. +""" + +import json +import tempfile + +from raphtory import Graph, PersistentGraph +from raphtory.graphql import GraphServer + +from utils import PORT + + +def _query(server, q: str) -> dict: + response = server.get_client().query(q) + return json.loads(response) if isinstance(response, str) else response + + +def test_event_graph_default_uses_event_semantics(): + """Without `graphType`, an event-stored graph keeps event semantics — a + window after the addition event sees no edge.""" + work_dir = tempfile.mkdtemp() + g = Graph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + graph(path: "g") { + window(start: 5, end: 10) { + hasEdge(src: "a", dst: "b") + } + } + } + """, + ) + assert result["graph"]["window"]["hasEdge"] is False + + +def test_event_graph_read_as_persistent_carries_value_forward(): + """Reading an event graph through `graphType: PERSISTENT` makes the edge + visible in a window that starts after the addition event.""" + work_dir = tempfile.mkdtemp() + g = Graph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + graph(path: "g", graphType: PERSISTENT) { + window(start: 5, end: 10) { + hasEdge(src: "a", dst: "b") + } + } + } + """, + ) + assert result["graph"]["window"]["hasEdge"] is True + + +def test_persistent_graph_default_carries_value_forward(): + """Without `graphType`, a persistent-stored graph keeps persistent + semantics — the edge is alive in a window after the add.""" + work_dir = tempfile.mkdtemp() + g = PersistentGraph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + graph(path: "g") { + window(start: 5, end: 10) { + hasEdge(src: "a", dst: "b") + } + } + } + """, + ) + assert result["graph"]["window"]["hasEdge"] is True + + +def test_persistent_graph_read_as_event_drops_carried_values(): + """Reading a persistent graph through `graphType: EVENT` makes the edge + invisible in a window that starts after the addition event.""" + work_dir = tempfile.mkdtemp() + g = PersistentGraph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + graph(path: "g", graphType: EVENT) { + window(start: 5, end: 10) { + hasEdge(src: "a", dst: "b") + } + } + } + """, + ) + assert result["graph"]["window"]["hasEdge"] is False + + +def test_mutable_event_graph_default_uses_event_semantics(): + """`updateGraph(path).graph` without `graphType` keeps event semantics.""" + work_dir = tempfile.mkdtemp() + g = Graph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + updateGraph(path: "g") { + graph { + window(start: 5, end: 10) { hasEdge(src: "a", dst: "b") } + } + } + } + """, + ) + assert result["updateGraph"]["graph"]["window"]["hasEdge"] is False + + +def test_mutable_event_graph_read_as_persistent_carries_value_forward(): + """`updateGraph(path).graph(graphType: PERSISTENT)` re-interprets an + event-stored graph through persistent semantics.""" + work_dir = tempfile.mkdtemp() + g = Graph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + updateGraph(path: "g") { + graph(graphType: PERSISTENT) { + window(start: 5, end: 10) { hasEdge(src: "a", dst: "b") } + } + } + } + """, + ) + assert result["updateGraph"]["graph"]["window"]["hasEdge"] is True + + +def test_mutable_persistent_graph_default_carries_value_forward(): + """`updateGraph(path).graph` on a persistent graph keeps persistent + semantics by default.""" + work_dir = tempfile.mkdtemp() + g = PersistentGraph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + updateGraph(path: "g") { + graph { + window(start: 5, end: 10) { hasEdge(src: "a", dst: "b") } + } + } + } + """, + ) + assert result["updateGraph"]["graph"]["window"]["hasEdge"] is True + + +def test_mutable_persistent_graph_read_as_event_drops_carried_values(): + """`updateGraph(path).graph(graphType: EVENT)` re-interprets a + persistent graph through event semantics.""" + work_dir = tempfile.mkdtemp() + g = PersistentGraph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + updateGraph(path: "g") { + graph(graphType: EVENT) { + window(start: 5, end: 10) { hasEdge(src: "a", dst: "b") } + } + } + } + """, + ) + assert result["updateGraph"]["graph"]["window"]["hasEdge"] is False + + +def test_mutable_graph_reads_pending_mutations_through_override(): + """Mutate via `updateGraph(path)`, then read back via the graph accessor + with a `graphType` override — both the existing data and the new edge + should be visible under the chosen semantics.""" + work_dir = tempfile.mkdtemp() + g = Graph() + g.add_edge(1, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + updateGraph(path: "g") { + addEdge(time: 2, src: "b", dst: "c") { success } + asPersistent: graph(graphType: PERSISTENT) { + window(start: 5, end: 10) { + abEdge: hasEdge(src: "a", dst: "b") + bcEdge: hasEdge(src: "b", dst: "c") + } + } + } + } + """, + ) + win = result["updateGraph"]["asPersistent"]["window"] + assert win["abEdge"] is True + assert win["bcEdge"] is True + + +def test_persistent_deletes_visible_via_persistent_view(): + """A delete event in a persistent graph propagates: a window after the + deletion shows the edge as gone under persistent semantics.""" + work_dir = tempfile.mkdtemp() + g = PersistentGraph() + g.add_edge(1, "a", "b") + g.delete_edge(5, "a", "b") + with GraphServer(work_dir).start(PORT) as server: + server.get_client().send_graph(path="g", graph=g) + + result = _query( + server, + """ + { + before: graph(path: "g") { + window(start: 2, end: 4) { hasEdge(src: "a", dst: "b") } + } + after: graph(path: "g") { + window(start: 6, end: 10) { hasEdge(src: "a", dst: "b") } + } + } + """, + ) + assert result["before"]["window"]["hasEdge"] is True + assert result["after"]["window"]["hasEdge"] is False diff --git a/python/tests/test_base_install/test_graphql/test_gql_misc_surface.py b/python/tests/test_base_install/test_graphql/test_gql_misc_surface.py new file mode 100644 index 0000000000..ca5ee1a4a8 --- /dev/null +++ b/python/tests/test_base_install/test_graphql/test_gql_misc_surface.py @@ -0,0 +1,350 @@ +"""Tests for smaller GraphQL surface fields that previously had no coverage: + +- `Nodes.ids` +- `Edges.explode`, `Edges.explodeLayers` +- `PathFromNode.ids` +- `History.isEmpty` +- `TemporalProperty.orderedDedupe` +- `MetaGraph.nodeCount`, `MetaGraph.edgeCount`, `QueryRoot.graphMetadata` +""" + +from utils import run_group_graphql_test +from raphtory import Graph + + +def create_graph() -> Graph: + graph = Graph() + graph.add_node(10, "A", node_type="person") + graph.add_node(10, "B", node_type="person") + graph.add_node(15, "C", node_type="org") + graph.add_node(40, "D", node_type="org") + + graph.add_edge(10, "A", "B", properties={"weight": 1.0}, layer="layer1") + graph.add_edge(20, "A", "B", properties={"weight": 2.0}, layer="layer1") + graph.add_edge(30, "A", "B", properties={"weight": 3.0}, layer="layer2") + graph.add_edge(15, "A", "C", layer="layer1") + graph.add_edge(25, "A", "C", layer="layer2") + graph.add_edge(40, "C", "D", layer="layer1") + graph.add_edge(50, "B", "A", layer="layer2") + graph.add_edge(25, "A", "A", layer="layer1") + + return graph + + +def test_nodes_ids(): + """`nodes.ids` on base / window / layer views.""" + graph = create_graph() + queries_and_expected = [] + + # base: all 4 nodes present + query = """{ graph(path: "g") { nodes { ids } } }""" + queries_and_expected.append( + (query, {"graph": {"nodes": {"ids": ["A", "B", "C", "D"]}}}) + ) + + # window [10, 25): D (added at 40) excluded + query = """{ graph(path: "g") { window(start: 10, end: 25) { nodes { ids } } } }""" + queries_and_expected.append( + ( + query, + {"graph": {"window": {"nodes": {"ids": ["A", "B", "C"]}}}}, + ) + ) + + # layer(layer2): D only has a layer1 edge (C->D) but is still present + # because base-layer (non-layered) node events are always included in any + # layer view — D was added via `add_node(40, "D", ...)` with no layer arg. + query = """{ graph(path: "g") { layer(name: "layer2") { nodes { ids } } } }""" + queries_and_expected.append( + ( + query, + {"graph": {"layer": {"nodes": {"ids": ["A", "B", "C", "D"]}}}}, + ) + ) + + run_group_graphql_test(queries_and_expected, graph, sort_output=True) + + +def test_edges_explode_and_explode_layers(): + """`edges.explode` / `explodeLayers` — collection-level explosion.""" + graph = create_graph() + queries_and_expected = [] + + # Restrict to layer(layer1) + window [10, 25) so the output stays small: + # edges in scope: A->B@10, A->B@20, A->C@15. 3 explode events. + # explodeLayers: one per (edge, layer) pair => 2 entries (A->B on layer1, A->C on layer1). + query = """ + { + graph(path: "g") { + layer(name: "layer1") { + window(start: 10, end: 25) { + edges { + explode { list { src { name } dst { name } time { timestamp } layerName } } + explodeLayers { list { src { name } dst { name } layerName } } + } + } + } + } + } + """ + expected = { + "graph": { + "layer": { + "window": { + "edges": { + "explode": { + "list": [ + { + "src": {"name": "A"}, + "dst": {"name": "B"}, + "time": {"timestamp": 10}, + "layerName": "layer1", + }, + { + "src": {"name": "A"}, + "dst": {"name": "B"}, + "time": {"timestamp": 20}, + "layerName": "layer1", + }, + { + "src": {"name": "A"}, + "dst": {"name": "C"}, + "time": {"timestamp": 15}, + "layerName": "layer1", + }, + ] + }, + "explodeLayers": { + "list": [ + { + "src": {"name": "A"}, + "dst": {"name": "B"}, + "layerName": "layer1", + }, + { + "src": {"name": "A"}, + "dst": {"name": "C"}, + "layerName": "layer1", + }, + ] + }, + } + } + } + } + } + queries_and_expected.append((query, expected)) + + run_group_graphql_test(queries_and_expected, graph, sort_output=True) + + +def test_path_from_node_ids(): + """`pathFromNode.ids` via `neighbours` / `inNeighbours` / `outNeighbours`.""" + graph = create_graph() + queries_and_expected = [] + + # A's neighbours (undirected): B, C, A (self-loop). + # A's outNeighbours: B, C, A + # A's inNeighbours: A, B + query = """ + { + graph(path: "g") { + node(name: "A") { + neighbours { ids } + outNeighbours { ids } + inNeighbours { ids } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "node": { + "neighbours": {"ids": ["A", "B", "C"]}, + "outNeighbours": {"ids": ["A", "B", "C"]}, + "inNeighbours": {"ids": ["A", "B"]}, + } + } + }, + ) + ) + + # layer(layer2) changes A's neighbourhood: A->B, A->C, B->A are the only + # layer2 edges touching A => neighbours={B, C}, outNeighbours={B, C}, inNeighbours={B}. + query = """ + { + graph(path: "g") { + layer(name: "layer2") { + node(name: "A") { + neighbours { ids } + outNeighbours { ids } + inNeighbours { ids } + } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "layer": { + "node": { + "neighbours": {"ids": ["B", "C"]}, + "outNeighbours": {"ids": ["B", "C"]}, + "inNeighbours": {"ids": ["B"]}, + } + } + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph, sort_output=True) + + +def test_history_is_empty(): + """`history.isEmpty` is true on an empty window, false otherwise.""" + graph = create_graph() + queries_and_expected = [] + + # A has history (created at t=10) + query = """{ graph(path: "g") { node(name: "A") { history { isEmpty } } } }""" + queries_and_expected.append( + (query, {"graph": {"node": {"history": {"isEmpty": False}}}}) + ) + + # Windowing the node (not the graph) keeps the node reachable but empties + # its history => isEmpty = True. + query = """ + { + graph(path: "g") { + node(name: "A") { + window(start: 0, end: 5) { history { isEmpty } } + } + } + } + """ + queries_and_expected.append( + ( + query, + {"graph": {"node": {"window": {"history": {"isEmpty": True}}}}}, + ) + ) + + # Same trick for an edge: pick a window with no A->B updates (the only + # A->B updates are at t=10, 20, 30). + query = """ + { + graph(path: "g") { + edge(src: "A", dst: "B") { + window(start: 40, end: 45) { history { isEmpty } } + } + } + } + """ + queries_and_expected.append( + ( + query, + {"graph": {"edge": {"window": {"history": {"isEmpty": True}}}}}, + ) + ) + + # Edge A->B's full history is non-empty + query = """ + { + graph(path: "g") { + edge(src: "A", dst: "B") { history { isEmpty } } + } + } + """ + queries_and_expected.append( + (query, {"graph": {"edge": {"history": {"isEmpty": False}}}}) + ) + + run_group_graphql_test(queries_and_expected, graph) + + +def test_temporal_property_ordered_dedupe(): + """`TemporalProperty.orderedDedupe` — collapses consecutive-equal updates.""" + g = Graph() + # state timeline: a(1), a(2), b(3), a(4), a(5) + # latestTime=True => keeps the latest timestamp of each run: (2,'a'), (3,'b'), (5,'a') + # latestTime=False => keeps the first timestamp of each run: (1,'a'), (3,'b'), (4,'a') + g.add_node(1, "X", properties={"state": "a"}) + g.add_node(2, "X", properties={"state": "a"}) + g.add_node(3, "X", properties={"state": "b"}) + g.add_node(4, "X", properties={"state": "a"}) + g.add_node(5, "X", properties={"state": "a"}) + + queries_and_expected = [] + + query = """ + { + graph(path: "g") { + node(name: "X") { + properties { + temporal { + get(key: "state") { + latest: orderedDedupe(latestTime: true) { time { timestamp } value } + first: orderedDedupe(latestTime: false) { time { timestamp } value } + } + } + } + } + } + } + """ + expected = { + "graph": { + "node": { + "properties": { + "temporal": { + "get": { + "latest": [ + {"time": {"timestamp": 2}, "value": "a"}, + {"time": {"timestamp": 3}, "value": "b"}, + {"time": {"timestamp": 5}, "value": "a"}, + ], + "first": [ + {"time": {"timestamp": 1}, "value": "a"}, + {"time": {"timestamp": 3}, "value": "b"}, + {"time": {"timestamp": 4}, "value": "a"}, + ], + } + } + } + } + } + } + queries_and_expected.append((query, expected)) + + run_group_graphql_test(queries_and_expected, g) + + +def test_meta_graph_counts(): + """`graphMetadata` → `MetaGraph.nodeCount` / `edgeCount` report persisted + counts for a stored graph without loading it.""" + graph = create_graph() + queries_and_expected = [] + + query = """{ graphMetadata(path: "g") { nodeCount edgeCount name path } }""" + queries_and_expected.append( + ( + query, + { + "graphMetadata": { + "nodeCount": 4, + "edgeCount": 5, + "name": "g", + "path": "g", + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph) diff --git a/python/tests/test_base_install/test_graphql/test_gql_mutation_time_input.py b/python/tests/test_base_install/test_graphql/test_gql_mutation_time_input.py new file mode 100644 index 0000000000..bd45823e21 --- /dev/null +++ b/python/tests/test_base_install/test_graphql/test_gql_mutation_time_input.py @@ -0,0 +1,296 @@ +"""Mutation arguments that take a time now accept the full `TimeInput` shape: + +- An `Int` (epoch milliseconds). +- An RFC3339 / ISO-8601 datetime string. +- An `{timestamp, eventId}` object. + +These tests verify each form on every mutation surface that takes a time, and +confirm the resulting graph state is identical regardless of which input form +was used. +""" + +import json +import tempfile + +from raphtory import Graph +from raphtory.graphql import GraphServer + +from utils import PORT + + +def _query(server, q: str) -> dict: + response = server.get_client().query(q) + return json.loads(response) if isinstance(response, str) else response + + +def test_add_node_accepts_int_string_and_object_time(): + """`addNode` accepts every `TimeInput` shape — Int, RFC3339 string, and + `{timestamp, eventId}` object. Each insertion lands at its expected + timestamp and is queryable afterwards.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + # Three forms, three different nodes. + client.query(""" + { + updateGraph(path: "g") { + int_form: addNode(time: 100, name: "intNode") { success } + str_form: addNode(time: "1970-01-01T00:00:00.200Z", name: "strNode") { success } + obj_form: addNode(time: {timestamp: 300, eventId: 0}, name: "objNode") { success } + } + } + """) + + # Verify each landed at the expected timestamp. + result = _query( + server, + """ + { + graph(path: "g") { + intNode: node(name: "intNode") { earliestTime { timestamp } } + strNode: node(name: "strNode") { earliestTime { timestamp } } + objNode: node(name: "objNode") { earliestTime { timestamp } } + } + } + """, + ) + assert result["graph"]["intNode"]["earliestTime"]["timestamp"] == 100 + assert result["graph"]["strNode"]["earliestTime"]["timestamp"] == 200 + assert result["graph"]["objNode"]["earliestTime"]["timestamp"] == 300 + + +def test_add_edge_and_delete_edge_accept_time_input_shapes(): + """`addEdge` / `deleteEdge` accept the same forms; verify on a persistent + graph so the deletion is visible via `isValid`.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.new_graph("g", "PERSISTENT") + + client.query(""" + { + updateGraph(path: "g") { + int_add: addEdge(time: 10, src: "a", dst: "b") { success } + str_add: addEdge(time: "1970-01-01T00:00:00.020Z", src: "a", dst: "b") { success } + obj_del: deleteEdge(time: {timestamp: 30, eventId: 0}, src: "a", dst: "b") { success } + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + edge(src: "a", dst: "b") { + earliestTime { timestamp } + latestTime { timestamp } + isValid + } + } + } + """, + ) + edge = result["graph"]["edge"] + assert edge["earliestTime"]["timestamp"] == 10 + # On a persistent graph, the deletion sets the latest valid time. + assert edge["latestTime"]["timestamp"] == 30 + # Edge was deleted at t=30 with no later re-addition, so it's invalid now. + assert edge["isValid"] is False + + +def test_add_properties_accepts_time_input_shapes(): + """`addProperties` (graph-level temporal properties) accepts Int, string, + and object.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + client.query(""" + { + updateGraph(path: "g") { + p1: addProperties(t: 100, properties: [{key: "score", value: {i64: 1}}]) + p2: addProperties(t: "1970-01-01T00:00:00.200Z", properties: [{key: "score", value: {i64: 2}}]) + p3: addProperties(t: {timestamp: 300, eventId: 0}, properties: [{key: "score", value: {i64: 3}}]) + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + properties { + temporal { + get(key: "score") { + history { list { timestamp } } + values + } + } + } + } + } + """, + ) + score = result["graph"]["properties"]["temporal"]["get"] + timestamps = [h["timestamp"] for h in score["history"]["list"]] + assert sorted(timestamps) == [100, 200, 300] + assert sorted(score["values"]) == [1, 2, 3] + + +def test_temporal_property_input_accepts_time_input_in_batch(): + """Inside `addNodes` / `addEdges`, the `time` field on each per-update + `TemporalPropertyInput` accepts every `TimeInput` shape.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + client.query(""" + { + updateGraph(path: "g") { + addNodes(nodes: [ + { + name: "n", + updates: [ + { time: 10, properties: [{key: "v", value: {i64: 1}}] }, + { time: "1970-01-01T00:00:00.020Z", properties: [{key: "v", value: {i64: 2}}] }, + { time: {timestamp: 30, eventId: 0}, properties: [{key: "v", value: {i64: 3}}] } + ] + } + ]) + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + node(name: "n") { + properties { temporal { get(key: "v") { + history { list { timestamp } } + values + } } } + } + } + } + """, + ) + v = result["graph"]["node"]["properties"]["temporal"]["get"] + timestamps = [h["timestamp"] for h in v["history"]["list"]] + assert timestamps == [10, 20, 30] + assert v["values"] == [1, 2, 3] + + +def test_add_edges_batch_accepts_time_input_shapes(): + """`addEdges` is the batch counterpart of `addNodes`. Each per-update + `time` field on its `TemporalPropertyInput` entries accepts every + `TimeInput` shape — Int, RFC3339 string, and `{timestamp, eventId}` object.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + client.query(""" + { + updateGraph(path: "g") { + addEdges(edges: [ + { + src: "a", dst: "b", + updates: [ + { time: 10, properties: [{key: "w", value: {i64: 1}}] }, + { time: "1970-01-01T00:00:00.020Z", properties: [{key: "w", value: {i64: 2}}] }, + { time: {timestamp: 30, eventId: 0}, properties: [{key: "w", value: {i64: 3}}] } + ] + } + ]) + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + edge(src: "a", dst: "b") { + properties { temporal { get(key: "w") { + history { list { timestamp } } + values + } } } + } + } + } + """, + ) + w = result["graph"]["edge"]["properties"]["temporal"]["get"] + timestamps = [h["timestamp"] for h in w["history"]["list"]] + assert timestamps == [10, 20, 30] + assert w["values"] == [1, 2, 3] + + +def test_mutable_node_and_edge_add_updates_accept_time_input(): + """`MutableNode.addUpdates` and `MutableEdge.addUpdates` / `delete` accept + every `TimeInput` shape via the `node()` / `edge()` lookups.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.new_graph("g", "PERSISTENT") + + # Seed the node and edge so we can look them up below. + client.query(""" + { + updateGraph(path: "g") { + addNode(time: 0, name: "n") { success } + addEdge(time: 0, src: "a", dst: "b") { success } + } + } + """) + + client.query(""" + { + updateGraph(path: "g") { + node(name: "n") { + i: addUpdates(time: 100, properties: [{key: "v", value: {i64: 1}}]) + s: addUpdates(time: "1970-01-01T00:00:00.200Z", properties: [{key: "v", value: {i64: 2}}]) + o: addUpdates(time: {timestamp: 300, eventId: 0}, properties: [{key: "v", value: {i64: 3}}]) + } + edge(src: "a", dst: "b") { + i: addUpdates(time: 10, properties: [{key: "w", value: {i64: 1}}]) + s: addUpdates(time: "1970-01-01T00:00:00.020Z", properties: [{key: "w", value: {i64: 2}}]) + d: delete(time: {timestamp: 30, eventId: 0}) + } + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + node(name: "n") { + properties { temporal { get(key: "v") { values } } } + } + edge(src: "a", dst: "b") { + properties { temporal { get(key: "w") { values } } } + isValid + } + } + } + """, + ) + assert sorted( + result["graph"]["node"]["properties"]["temporal"]["get"]["values"] + ) == [1, 2, 3] + assert sorted( + result["graph"]["edge"]["properties"]["temporal"]["get"]["values"] + ) == [1, 2] + # delete at t=30 with no later re-add → edge is invalid at the latest time. + assert result["graph"]["edge"]["isValid"] is False diff --git a/python/tests/test_base_install/test_graphql/test_gql_node_edge_surface.py b/python/tests/test_base_install/test_graphql/test_gql_node_edge_surface.py new file mode 100644 index 0000000000..0954622849 --- /dev/null +++ b/python/tests/test_base_install/test_graphql/test_gql_node_edge_surface.py @@ -0,0 +1,553 @@ +"""Tests for `Node` and `Edge` fields that previously had no GraphQL coverage. + +Node fields covered: +- `firstUpdate`, `lastUpdate` +- `edgeHistoryCount` +- `inDegree`, `outDegree` +- `inEdges`, `outEdges` + +Edge fields covered: +- `firstUpdate`, `lastUpdate` +- `layerNames` +- `layerName` (with error case) +- `explode`, `explodeLayers` +- `isValid`, `isSelfLoop` +- `nbr` on an exploded edge + +All tested under base + window + layer composition where applicable. +""" + +from utils import run_group_graphql_test, run_graphql_error_test_contains +from raphtory import Graph + + +def create_graph() -> Graph: + graph = Graph() + + graph.add_node(10, "A", node_type="person") + graph.add_node(10, "B", node_type="person") + graph.add_node(15, "C", node_type="org") + graph.add_node(40, "D", node_type="org") + + graph.add_edge(10, "A", "B", properties={"weight": 1.0}, layer="layer1") + graph.add_edge(20, "A", "B", properties={"weight": 2.0}, layer="layer1") + graph.add_edge(30, "A", "B", properties={"weight": 3.0}, layer="layer2") + graph.add_edge(15, "A", "C", layer="layer1") + graph.add_edge(25, "A", "C", layer="layer2") + graph.add_edge(40, "C", "D", layer="layer1") + graph.add_edge(50, "B", "A", layer="layer2") + graph.add_edge(25, "A", "A", layer="layer1") # self-loop + + return graph + + +def test_node_update_times_and_edge_history_count(): + """`firstUpdate`, `lastUpdate`, `edgeHistoryCount` under base / window / layer.""" + graph = create_graph() + queries_and_expected = [] + + # Base: A has events from t=10 (add_node + A->B) to t=50 (B->A). + # Edge events touching A: A->B @10, @20, @30; A->C @15, @25; A->A @25; B->A @50 => 7. + query = """ + { + graph(path: "g") { + node(name: "A") { + firstUpdate { timestamp } + lastUpdate { timestamp } + edgeHistoryCount + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "node": { + "firstUpdate": {"timestamp": 10}, + "lastUpdate": {"timestamp": 50}, + "edgeHistoryCount": 7, + } + } + }, + ) + ) + + # Windowed [15, 40): first event for A is at 15 (A->C), last is at 30 (A->B). + # Events touching A in window: A->B@20, A->B@30, A->C@15, A->C@25, A->A@25 => 5. + query = """ + { + graph(path: "g") { + window(start: 15, end: 40) { + node(name: "A") { + firstUpdate { timestamp } + lastUpdate { timestamp } + edgeHistoryCount + } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "window": { + "node": { + "firstUpdate": {"timestamp": 15}, + "lastUpdate": {"timestamp": 30}, + "edgeHistoryCount": 5, + } + } + } + }, + ) + ) + + # layer(layer2) on A: node events (add_node @ t=10) aren't layer-scoped so + # firstUpdate still sees t=10. Edge events on layer2 touching A are at 25, + # 30, 50 => lastUpdate=50, edgeHistoryCount=3. + query = """ + { + graph(path: "g") { + layer(name: "layer2") { + node(name: "A") { + firstUpdate { timestamp } + lastUpdate { timestamp } + edgeHistoryCount + } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "layer": { + "node": { + "firstUpdate": {"timestamp": 10}, + "lastUpdate": {"timestamp": 50}, + "edgeHistoryCount": 3, + } + } + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph) + + +def test_node_directed_degrees_and_edges(): + """`inDegree`, `outDegree`, `inEdges`, `outEdges` under base / window / layer.""" + graph = create_graph() + queries_and_expected = [] + + # Base on A: + # out-edges: A->B, A->C, A->A => outDegree=3 + # in-edges: A->A, B->A => inDegree=2 + query = """ + { + graph(path: "g") { + node(name: "A") { + inDegree + outDegree + inEdges { list { src { name } dst { name } } } + outEdges { list { src { name } dst { name } } } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "node": { + "inDegree": 2, + "outDegree": 3, + "inEdges": { + "list": [ + {"src": {"name": "A"}, "dst": {"name": "A"}}, + {"src": {"name": "B"}, "dst": {"name": "A"}}, + ] + }, + "outEdges": { + "list": [ + {"src": {"name": "A"}, "dst": {"name": "B"}}, + {"src": {"name": "A"}, "dst": {"name": "C"}}, + {"src": {"name": "A"}, "dst": {"name": "A"}}, + ] + }, + } + } + }, + ) + ) + + # layer(layer2): only A->B@30, A->C@25, B->A@50 touch A. + # outEdges(A): A->B, A->C (no A->A since self-loop is layer1) + # inEdges(A): B->A + query = """ + { + graph(path: "g") { + layer(name: "layer2") { + node(name: "A") { + inDegree + outDegree + inEdges { list { src { name } dst { name } } } + outEdges { list { src { name } dst { name } } } + } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "layer": { + "node": { + "inDegree": 1, + "outDegree": 2, + "inEdges": { + "list": [ + {"src": {"name": "B"}, "dst": {"name": "A"}}, + ] + }, + "outEdges": { + "list": [ + {"src": {"name": "A"}, "dst": {"name": "B"}}, + {"src": {"name": "A"}, "dst": {"name": "C"}}, + ] + }, + } + } + } + }, + ) + ) + + # windowed [10, 30): A->A@25 + A->B@10, A->B@20 + A->C@15 => A has 3 out-edges (to A, B, C), 1 in-edge (A->A) + query = """ + { + graph(path: "g") { + window(start: 10, end: 30) { + node(name: "A") { + inDegree + outDegree + } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "window": { + "node": { + "inDegree": 1, + "outDegree": 3, + } + } + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph, sort_output=True) + + +def test_edge_update_times(): + """`firstUpdate` / `lastUpdate` on edges under base / window / layer.""" + graph = create_graph() + queries_and_expected = [] + + # A->B: base updates at 10, 20, 30. first=10, last=30. + query = """ + { + graph(path: "g") { + edge(src: "A", dst: "B") { + firstUpdate { timestamp } + lastUpdate { timestamp } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "edge": { + "firstUpdate": {"timestamp": 10}, + "lastUpdate": {"timestamp": 30}, + } + } + }, + ) + ) + + # layer(layer1) on A->B: updates at 10 and 20 only. + query = """ + { + graph(path: "g") { + layer(name: "layer1") { + edge(src: "A", dst: "B") { + firstUpdate { timestamp } + lastUpdate { timestamp } + } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "layer": { + "edge": { + "firstUpdate": {"timestamp": 10}, + "lastUpdate": {"timestamp": 20}, + } + } + } + }, + ) + ) + + # window [15, 25) on A->B: only update at 20. + query = """ + { + graph(path: "g") { + window(start: 15, end: 25) { + edge(src: "A", dst: "B") { + firstUpdate { timestamp } + lastUpdate { timestamp } + } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "window": { + "edge": { + "firstUpdate": {"timestamp": 20}, + "lastUpdate": {"timestamp": 20}, + } + } + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph) + + +def test_edge_layers_and_explode(): + """`layerNames`, `layerName`, `explode`, `explodeLayers`, `isSelfLoop`, `isValid`.""" + graph = create_graph() + queries_and_expected = [] + + # A->B spans layer1 + layer2 + query = """ + { + graph(path: "g") { + edge(src: "A", dst: "B") { + layerNames + isSelfLoop + isValid + explode { list { src { name } dst { name } time { timestamp } layerName } } + explodeLayers { list { layerName } } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "edge": { + "layerNames": ["layer1", "layer2"], + "isSelfLoop": False, + "isValid": True, + "explode": { + "list": [ + { + "src": {"name": "A"}, + "dst": {"name": "B"}, + "time": {"timestamp": 10}, + "layerName": "layer1", + }, + { + "src": {"name": "A"}, + "dst": {"name": "B"}, + "time": {"timestamp": 20}, + "layerName": "layer1", + }, + { + "src": {"name": "A"}, + "dst": {"name": "B"}, + "time": {"timestamp": 30}, + "layerName": "layer2", + }, + ] + }, + "explodeLayers": { + "list": [ + {"layerName": "layer1"}, + {"layerName": "layer2"}, + ] + }, + } + } + }, + ) + ) + + # A->A self-loop (layer1 only) + query = """ + { + graph(path: "g") { + edge(src: "A", dst: "A") { + isSelfLoop + layerNames + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "edge": { + "isSelfLoop": True, + "layerNames": ["layer1"], + } + } + }, + ) + ) + + # `layerName` only works on edges that have been exploded (either fully or + # per-layer). Verified via explodeLayers on a multi-layer edge. + query = """ + { + graph(path: "g") { + edge(src: "A", dst: "B") { + explodeLayers { list { layerName layerNames } } + } + } + } + """ + queries_and_expected.append( + ( + query, + { + "graph": { + "edge": { + "explodeLayers": { + "list": [ + {"layerName": "layer1", "layerNames": ["layer1"]}, + {"layerName": "layer2", "layerNames": ["layer2"]}, + ] + } + } + } + }, + ) + ) + + run_group_graphql_test(queries_and_expected, graph) + + +def test_edge_layer_name_errors_on_non_exploded_edge(): + """`layerName` errors on any edge that hasn't been exploded — the + single-layer form is only available after `.explode()` or + `.explodeLayers()`.""" + query = """ + { + graph(path: "g") { + edge(src: "A", dst: "B") { + layerName + } + } + } + """ + run_graphql_error_test_contains( + query, ["layer_name function is only available", "exploded"], create_graph() + ) + + +def test_edge_nbr_on_exploded_edge(): + """`nbr` on the exploded form of an out-edge returns `dst`.""" + graph = create_graph() + queries_and_expected = [] + + # Explode A->B and ask for `nbr` on each: each should be B (the other end). + query = """ + { + graph(path: "g") { + node(name: "A") { + outEdges { + list { + explode { list { nbr { name } } } + } + } + } + } + } + """ + # A has three out-edges (A->A, A->B, A->C). nbr of each exploded event is + # the "other" node — for A->A that's A (both ends), for A->B it's B, etc. + # A->A: 1 exploded event -> [A] + # A->B: 3 exploded events -> [B, B, B] + # A->C: 2 exploded events -> [C, C] + expected = { + "graph": { + "node": { + "outEdges": { + "list": [ + { + "explode": { + "list": [{"nbr": {"name": "A"}}], + } + }, + { + "explode": { + "list": [ + {"nbr": {"name": "B"}}, + {"nbr": {"name": "B"}}, + {"nbr": {"name": "B"}}, + ], + } + }, + { + "explode": { + "list": [ + {"nbr": {"name": "C"}}, + {"nbr": {"name": "C"}}, + ], + } + }, + ] + } + } + } + } + queries_and_expected.append((query, expected)) + run_group_graphql_test(queries_and_expected, graph, sort_output=True) diff --git a/python/tests/test_base_install/test_graphql/test_gql_node_id.py b/python/tests/test_base_install/test_graphql/test_gql_node_id.py new file mode 100644 index 0000000000..14b9a82891 --- /dev/null +++ b/python/tests/test_base_install/test_graphql/test_gql_node_id.py @@ -0,0 +1,318 @@ +"""Node-id arguments now accept the full `NodeId` shape: + +- A `String` (e.g. `"alice"`). +- A non-negative `Int` (e.g. `42`). + +These tests exercise both forms across the major lookup, mutation, and +view-transform surfaces, and confirm a graph indexed by integers can be +queried and mutated through the GraphQL server. +""" + +import json +import tempfile + +from raphtory import Graph +from raphtory.graphql import GraphServer + +from utils import PORT + + +def _query(server, q: str) -> dict: + response = server.get_client().query(q) + return json.loads(response) if isinstance(response, str) else response + + +def test_addnode_and_node_lookup_with_integer_ids(): + """A graph with integer node ids can be added and queried via the + GraphQL server. Raphtory enforces a single id type per graph, so this + test uses integers throughout.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + client.query(""" + { + updateGraph(path: "g") { + a: addNode(time: 1, name: 1) { success } + b: addNode(time: 2, name: 2) { success } + c: addNode(time: 3, name: 42) { success } + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + hasInt: hasNode(name: 1) + hasOther: hasNode(name: 42) + hasMissingInt: hasNode(name: 999) + int_node: node(name: 1) { earliestTime { timestamp } } + int_node2: node(name: 42) { earliestTime { timestamp } } + } + } + """, + ) + graph = result["graph"] + assert graph["hasInt"] is True + assert graph["hasOther"] is True + assert graph["hasMissingInt"] is False + assert graph["int_node"]["earliestTime"]["timestamp"] == 1 + assert graph["int_node2"]["earliestTime"]["timestamp"] == 3 + + +def test_addedge_and_edge_lookup_with_integer_endpoints(): + """Edge mutations and lookups accept integer ids on src/dst.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + client.query(""" + { + updateGraph(path: "g") { + e1: addEdge(time: 10, src: 1, dst: 2) { success } + e2: addEdge(time: 20, src: 2, dst: 3) { success } + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + hasIntEdge: hasEdge(src: 1, dst: 2) + hasIntEdge2: hasEdge(src: 2, dst: 3) + hasNoEdge: hasEdge(src: 1, dst: 3) + e1: edge(src: 1, dst: 2) { earliestTime { timestamp } } + e2: edge(src: 2, dst: 3) { earliestTime { timestamp } } + } + } + """, + ) + graph = result["graph"] + assert graph["hasIntEdge"] is True + assert graph["hasIntEdge2"] is True + assert graph["hasNoEdge"] is False + assert graph["e1"]["earliestTime"]["timestamp"] == 10 + assert graph["e2"]["earliestTime"]["timestamp"] == 20 + + +def test_view_transforms_with_integer_node_ids(): + """`subgraph`, `excludeNodes`, and `sharedNeighbours` accept integer + node ids.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + # Build a small integer-id graph: 1 → 2, 1 → 3, 4 → 2 (so 1 and 4 share neighbour 2). + client.query(""" + { + updateGraph(path: "g") { + e1: addEdge(time: 1, src: 1, dst: 2) { success } + e2: addEdge(time: 2, src: 1, dst: 3) { success } + e3: addEdge(time: 3, src: 4, dst: 2) { success } + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + sub: subgraph(nodes: [1, 2]) { countNodes } + exclude: excludeNodes(nodes: [3]) { countNodes } + shared: sharedNeighbours(selectedNodes: [1, 4]) { id } + } + } + """, + ) + graph = result["graph"] + assert graph["sub"]["countNodes"] == 2 + assert graph["exclude"]["countNodes"] == 3 # 1, 2, 4 (3 removed) + # `1` and `4` both connect to `2`, so 2 is the shared neighbour. + # Integer-indexed graph → `id` comes back as a number. + shared_ids = sorted(s["id"] for s in graph["shared"]) + assert shared_ids == [2] + + +def test_batch_addnodes_addedges_with_integer_ids(): + """`addNodes` and `addEdges` accept integer ids in `name`/`src`/`dst`.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + client.query(""" + { + updateGraph(path: "g") { + addNodes(nodes: [ + { name: 1, updates: [{ time: 1, properties: [{key: "v", value: {i64: 10}}] }] } + { name: 42, updates: [{ time: 2, properties: [{key: "v", value: {i64: 20}}] }] } + ]) + addEdges(edges: [ + { src: 1, dst: 2, updates: [{ time: 3, properties: [{key: "w", value: {f64: 1.5}}] }] } + { src: 2, dst: 42, updates: [{ time: 4, properties: [{key: "w", value: {f64: 2.5}}] }] } + ]) + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + n1: node(name: 1) { properties { temporal { get(key: "v") { values } } } } + n2: node(name: 42) { properties { temporal { get(key: "v") { values } } } } + e1: edge(src: 1, dst: 2) { properties { temporal { get(key: "w") { values } } } } + e2: edge(src: 2, dst: 42) { properties { temporal { get(key: "w") { values } } } } + } + } + """, + ) + graph = result["graph"] + assert graph["n1"]["properties"]["temporal"]["get"]["values"] == [10] + assert graph["n2"]["properties"]["temporal"]["get"]["values"] == [20] + assert graph["e1"]["properties"]["temporal"]["get"]["values"] == [1.5] + assert graph["e2"]["properties"]["temporal"]["get"]["values"] == [2.5] + + +def test_view_transforms_with_string_node_ids(): + """`subgraph`, `excludeNodes`, and `sharedNeighbours` accept string node ids.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + # alice → bob, alice → carol, dave → bob (alice and dave share bob). + client.query(""" + { + updateGraph(path: "g") { + e1: addEdge(time: 1, src: "alice", dst: "bob") { success } + e2: addEdge(time: 2, src: "alice", dst: "carol") { success } + e3: addEdge(time: 3, src: "dave", dst: "bob") { success } + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + sub: subgraph(nodes: ["alice", "bob"]) { countNodes } + exclude: excludeNodes(nodes: ["carol"]) { countNodes } + shared: sharedNeighbours(selectedNodes: ["alice", "dave"]) { id } + } + } + """, + ) + graph = result["graph"] + assert graph["sub"]["countNodes"] == 2 + assert graph["exclude"]["countNodes"] == 3 # alice, bob, dave + shared_ids = sorted(s["id"] for s in graph["shared"]) + assert shared_ids == ["bob"] + + +def test_batch_addnodes_addedges_with_string_ids(): + """`addNodes` and `addEdges` accept string ids in `name`/`src`/`dst`.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + client.query(""" + { + updateGraph(path: "g") { + addNodes(nodes: [ + { name: "alice", updates: [{ time: 1, properties: [{key: "v", value: {i64: 10}}] }] } + { name: "bob", updates: [{ time: 2, properties: [{key: "v", value: {i64: 20}}] }] } + ]) + addEdges(edges: [ + { src: "alice", dst: "bob", updates: [{ time: 3, properties: [{key: "w", value: {f64: 1.5}}] }] } + { src: "bob", dst: "carol", updates: [{ time: 4, properties: [{key: "w", value: {f64: 2.5}}] }] } + ]) + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + n1: node(name: "alice") { properties { temporal { get(key: "v") { values } } } } + n2: node(name: "bob") { properties { temporal { get(key: "v") { values } } } } + e1: edge(src: "alice", dst: "bob") { properties { temporal { get(key: "w") { values } } } } + e2: edge(src: "bob", dst: "carol") { properties { temporal { get(key: "w") { values } } } } + } + } + """, + ) + graph = result["graph"] + assert graph["n1"]["properties"]["temporal"]["get"]["values"] == [10] + assert graph["n2"]["properties"]["temporal"]["get"]["values"] == [20] + assert graph["e1"]["properties"]["temporal"]["get"]["values"] == [1.5] + assert graph["e2"]["properties"]["temporal"]["get"]["values"] == [2.5] + + +def test_string_ids_remain_unchanged_for_existing_clients(): + """Existing clients passing string node ids continue to work without + modification — the schema change is wire-compatible for strings.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + client.query(""" + { + updateGraph(path: "g") { + addNode(time: 1, name: "alice") { success } + addEdge(time: 2, src: "alice", dst: "bob") { success } + } + } + """) + + result = _query( + server, + """ + { + graph(path: "g") { + node(name: "alice") { earliestTime { timestamp } } + edge(src: "alice", dst: "bob") { earliestTime { timestamp } } + } + } + """, + ) + assert result["graph"]["node"]["earliestTime"]["timestamp"] == 1 + assert result["graph"]["edge"]["earliestTime"]["timestamp"] == 2 + + +def test_negative_integer_rejected(): + """Schema rejects negative integers — `NodeId` only accepts non-negative.""" + work_dir = tempfile.mkdtemp() + with GraphServer(work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=Graph()) + + try: + client.query(""" + { + updateGraph(path: "g") { + addNode(time: 1, name: -1) { success } + } + } + """) + raise AssertionError( + "Expected schema-level rejection for negative integer NodeId" + ) + except Exception as e: + assert "NodeId" in str(e) or "non-negative" in str( + e + ), f"Expected NodeId rejection, got: {e}" diff --git a/python/tests/test_base_install/test_graphql/test_gql_temporal_aggregates.py b/python/tests/test_base_install/test_graphql/test_gql_temporal_aggregates.py index 7084ec3404..628745e629 100644 --- a/python/tests/test_base_install/test_graphql/test_gql_temporal_aggregates.py +++ b/python/tests/test_base_install/test_graphql/test_gql_temporal_aggregates.py @@ -1,5 +1,9 @@ -from utils import run_group_graphql_test +import json +import tempfile + +from utils import PORT, run_group_graphql_test from raphtory import Graph +from raphtory.graphql import GraphServer def create_graph() -> Graph: @@ -463,3 +467,131 @@ def test_edge_temporal_aggregates_across_layers(): queries_and_expected_outputs.append((query, expected_output)) run_group_graphql_test(queries_and_expected_outputs, graph) + + +def _run_typed_accessors_cases(graph, cases): + """Run queries against a fresh server. + + `cases` is a list of `(query, expected, transform)` where `transform` is + applied to both the response and the expected value before comparison + (needed for fields like `unique` whose ordering is non-deterministic). + """ + tmp_work_dir = tempfile.mkdtemp() + with GraphServer(tmp_work_dir, create_index=True).start(PORT) as server: + client = server.get_client() + client.send_graph(path="g", graph=graph) + for query, expected, transform in cases: + response = client.query(query) + response_dict = ( + json.loads(response) if isinstance(response, str) else response + ) + actual = transform(response_dict) if transform else response_dict + expected_t = transform(expected) if transform else expected + assert actual == expected_t, f"Expected:\n{expected_t}\nGot:\n{actual}" + + +def _sort_unique(path): + """Returns a transform that sorts the `unique` list at the given dict path.""" + + def transform(d): + d = json.loads(json.dumps(d)) # deep copy + cur = d + for step in path: + cur = cur[step] + cur["unique"] = sorted(cur["unique"]) + return d + + return transform + + +def test_temporal_property_typed_accessors(): + """`values`, `latest`, `at`, and `unique` on `TemporalProperty` return + properly typed values (numbers stay numbers, bools stay bools, etc.).""" + graph = create_graph() # node "A" with score [10, 20, 30, 40] at t=100..400 + # bool property on the same node so we exercise non-numeric typing too + graph.add_node(100, "A", properties={"flag": True}) + graph.add_node(200, "A", properties={"flag": False}) + graph.add_node(300, "A", properties={"flag": True}) + + cases = [] + path = ["graph", "node", "properties", "temporal", "get"] + + # numeric values stay numeric; `at(t)` returns the latest value at-or-before t + query = """ + { + graph(path: "g") { + node(name: "A") { + properties { + temporal { + get(key: "score") { + values + latest + atEarly: at(t: 50) + atMid: at(t: 250) + atExact: at(t: 200) + atLate: at(t: 1000) + unique + } + } + } + } + } + } + """ + expected = { + "graph": { + "node": { + "properties": { + "temporal": { + "get": { + "values": [10, 20, 30, 40], + "latest": 40, + "atEarly": None, + "atMid": 20, + "atExact": 20, + "atLate": 40, + "unique": [10, 20, 30, 40], + } + } + } + } + } + } + cases.append((query, expected, _sort_unique(path))) + + # bools stay bools through values/latest/unique + query = """ + { + graph(path: "g") { + node(name: "A") { + properties { + temporal { + get(key: "flag") { + values + latest + unique + } + } + } + } + } + } + """ + expected = { + "graph": { + "node": { + "properties": { + "temporal": { + "get": { + "values": [True, False, True], + "latest": True, + "unique": [False, True], + } + } + } + } + } + } + cases.append((query, expected, _sort_unique(path))) + + _run_typed_accessors_cases(graph, cases) diff --git a/python/tests/test_base_install/test_graphql/test_rolling_expanding.py b/python/tests/test_base_install/test_graphql/test_rolling_expanding.py index 62a9b82a2c..1ae7a29005 100644 --- a/python/tests/test_base_install/test_graphql/test_rolling_expanding.py +++ b/python/tests/test_base_install/test_graphql/test_rolling_expanding.py @@ -627,7 +627,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 1, "start": {"timestamp": 1}, "end": {"timestamp": 2}, @@ -638,7 +638,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 2, "start": {"timestamp": 2}, "end": {"timestamp": 3}, @@ -649,7 +649,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 2, "start": {"timestamp": 3}, "end": {"timestamp": 4}, @@ -660,7 +660,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 1, "start": {"timestamp": 4}, "end": {"timestamp": 5}, @@ -671,7 +671,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 0, "start": {"timestamp": 5}, "end": {"timestamp": 6}, @@ -685,7 +685,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 1, "start": {"timestamp": 4}, "end": {"timestamp": 5}, @@ -696,7 +696,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 0, "start": {"timestamp": 5}, "end": {"timestamp": 6}, @@ -712,7 +712,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 2, "start": {"timestamp": 2}, "end": {"timestamp": 3}, @@ -724,7 +724,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 2, "start": {"timestamp": 2}, "end": {"timestamp": 4}, @@ -736,7 +736,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 2, "start": {"timestamp": 2}, "end": {"timestamp": 5}, @@ -748,7 +748,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 2, "start": {"timestamp": 2}, "end": {"timestamp": 6}, @@ -763,7 +763,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 2, "start": {"timestamp": 2}, "end": {"timestamp": 5}, @@ -775,7 +775,7 @@ def test_nodes(): { "page": [ { - "id": "1", + "id": 1, "degree": 2, "start": {"timestamp": 2}, "end": {"timestamp": 6}, @@ -890,7 +890,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 1}, "end": {"timestamp": 2}, @@ -901,7 +901,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 2}, "end": {"timestamp": 3}, @@ -912,7 +912,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 3}, "end": {"timestamp": 4}, @@ -923,7 +923,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 0, "start": {"timestamp": 4}, "end": {"timestamp": 5}, @@ -934,7 +934,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 0, "start": {"timestamp": 5}, "end": {"timestamp": 6}, @@ -948,7 +948,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 0, "start": {"timestamp": 4}, "end": {"timestamp": 5}, @@ -959,7 +959,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 0, "start": {"timestamp": 5}, "end": {"timestamp": 6}, @@ -975,7 +975,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 2}, "end": {"timestamp": 3}, @@ -987,7 +987,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 2}, "end": {"timestamp": 4}, @@ -999,7 +999,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 2}, "end": {"timestamp": 5}, @@ -1011,7 +1011,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 2}, "end": {"timestamp": 6}, @@ -1026,7 +1026,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 2}, "end": {"timestamp": 5}, @@ -1038,7 +1038,7 @@ def test_path(): { "page": [ { - "id": "2", + "id": 2, "degree": 1, "start": {"timestamp": 2}, "end": {"timestamp": 6}, @@ -1340,7 +1340,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 1}, "end": {"timestamp": 2}, "earliestTime": {"timestamp": 1}, @@ -1350,7 +1350,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 2}, "end": {"timestamp": 3}, "earliestTime": {"timestamp": 2}, @@ -1360,7 +1360,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 3}, "end": {"timestamp": 4}, "earliestTime": {"timestamp": 3}, @@ -1370,7 +1370,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 4}, "end": {"timestamp": 5}, "earliestTime": {"timestamp": None}, @@ -1380,7 +1380,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 5}, "end": {"timestamp": 6}, "earliestTime": {"timestamp": None}, @@ -1393,7 +1393,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 4}, "end": {"timestamp": 5}, "earliestTime": {"timestamp": None}, @@ -1403,7 +1403,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 5}, "end": {"timestamp": 6}, "earliestTime": {"timestamp": None}, @@ -1418,7 +1418,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 2}, "end": {"timestamp": 3}, "earliestTime": {"timestamp": 2}, @@ -1429,7 +1429,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 2}, "end": {"timestamp": 4}, "earliestTime": {"timestamp": 2}, @@ -1440,7 +1440,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 2}, "end": {"timestamp": 5}, "earliestTime": {"timestamp": 2}, @@ -1451,7 +1451,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 2}, "end": {"timestamp": 6}, "earliestTime": {"timestamp": 2}, @@ -1465,7 +1465,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 2}, "end": {"timestamp": 5}, "earliestTime": {"timestamp": 2}, @@ -1476,7 +1476,7 @@ def test_edges(): { "page": [ { - "id": ["1", "2"], + "id": [1, 2], "start": {"timestamp": 2}, "end": {"timestamp": 6}, "earliestTime": {"timestamp": 2}, diff --git a/python/tests/test_base_install/test_graphql/test_server_flags.py b/python/tests/test_base_install/test_graphql/test_server_flags.py index 7019bf5688..1c80de56f1 100644 --- a/python/tests/test_base_install/test_graphql/test_server_flags.py +++ b/python/tests/test_base_install/test_graphql/test_server_flags.py @@ -94,11 +94,16 @@ def test_max_query_complexity(): LIST_QUERIES = [ ("collection (namespaces)", "{ namespaces { list { path } } }"), ("GqlNodes", '{ graph(path: "g") { nodes { list { name } } } }'), + ("GqlNodes.ids", '{ graph(path: "g") { nodes { ids } } }'), ("GqlEdges", '{ graph(path: "g") { edges { list { src { name } } } } }'), ( "GqlPathFromNode", '{ graph(path: "g") { node(name: "ben") { neighbours { list { name } } } } }', ), + ( + "GqlPathFromNode.ids", + '{ graph(path: "g") { node(name: "ben") { neighbours { ids } } } }', + ), ( "GqlHistory", '{ graph(path: "g") { node(name: "ben") { history { list { timestamp } } } } }', diff --git a/python/tests/test_base_install/test_graphql/update_graph/test_edge_updates.py b/python/tests/test_base_install/test_graphql/update_graph/test_edge_updates.py index 2572cf0ed7..f7c0181542 100644 --- a/python/tests/test_base_install/test_graphql/update_graph/test_edge_updates.py +++ b/python/tests/test_base_install/test_graphql/update_graph/test_edge_updates.py @@ -139,5 +139,5 @@ def test_delete(): edge = rg.add_edge(1, "ben", "lucas", layer="colleagues") edge.delete(2, layer="colleagues") g = client.receive_graph("path/to/persistent_graph") - assert g.edge("ben", "hamza").deletions == [(2, 1)] - assert g.edge("ben", "lucas").deletions == [(2, 3)] + assert g.edge("ben", "hamza").deletions.t == [2] + assert g.edge("ben", "lucas").deletions.t == [2] diff --git a/python/tests/test_base_install/test_graphql/update_graph/test_graph_updates.py b/python/tests/test_base_install/test_graphql/update_graph/test_graph_updates.py index 999fba2b18..f7084ad6b9 100644 --- a/python/tests/test_base_install/test_graphql/update_graph/test_graph_updates.py +++ b/python/tests/test_base_install/test_graphql/update_graph/test_graph_updates.py @@ -147,7 +147,7 @@ def test_delete_edge(): rg.delete_edge(2, "ben", "hamza") g = client.receive_graph("path/to/event_graph") assert g.edge("ben", "hamza").history.t.collect() == [1] - assert g.edge("ben", "hamza").deletions == [(2, 1)] + assert g.edge("ben", "hamza").deletions.t.collect() == [2] client.new_graph("path/to/persistent_graph", "PERSISTENT") rg = client.remote_graph("path/to/persistent_graph") @@ -156,5 +156,5 @@ def test_delete_edge(): rg.add_edge(1, "ben", "lucas", layer="colleagues") rg.delete_edge(2, "ben", "lucas", layer="colleagues") g = client.receive_graph("path/to/persistent_graph") - assert g.edge("ben", "hamza").deletions == [(2, 1)] - assert g.edge("ben", "lucas").deletions == [(2, 3)] + assert g.edge("ben", "hamza").deletions.t.collect() == [2] + assert g.edge("ben", "lucas").deletions.t.collect() == [2] diff --git a/python/tests/test_base_install/test_loaders/test_load_from_pandas.py b/python/tests/test_base_install/test_loaders/test_load_from_pandas.py index 573c786fa5..87d838a8e6 100644 --- a/python/tests/test_base_install/test_loaders/test_load_from_pandas.py +++ b/python/tests/test_base_install/test_loaders/test_load_from_pandas.py @@ -1891,3 +1891,152 @@ def test_load_nodes_invalid_layer_reference(): with pytest.raises(Exception, match="Invalid layer: nonexistent_layer"): g.layers(["nonexistent_layer"]) + + +# --- Schema loading: Decimal & datetime PropTypes ----------------------------- + + +def test_load_nodes_with_decimal_schema_from_string_column(): + """`schema={"col": PropType.decimal(N)}` casts a string column to Decimal.""" + from decimal import Decimal + + df = pd.DataFrame( + { + "id": ["s1", "s2"], + "time": [1, 2], + "price": ["19.99", "0.50"], + } + ) + g = Graph() + g.load_nodes( + data=df, + id="id", + time="time", + properties=["price"], + schema={"price": PropType.decimal(2)}, + ) + assert g.node("s1").properties["price"] == Decimal("19.99") + assert g.node("s2").properties["price"] == Decimal("0.50") + + +def test_load_nodes_with_decimal_schema_from_arrow_decimal_column(): + """An Arrow Decimal128 column maps onto `PropType.decimal(scale)`.""" + from decimal import Decimal + + arr = pa.array( + [Decimal("1.23"), Decimal("4.56")], + type=pa.decimal128(precision=10, scale=2), + ) + df = pd.DataFrame( + { + "id": ["s1", "s2"], + "time": [1, 2], + "price": arr.to_pandas(), + } + ) + g = Graph() + g.load_nodes( + data=df, + id="id", + time="time", + properties=["price"], + schema={"price": PropType.decimal(2)}, + ) + assert g.node("s1").properties["price"] == Decimal("1.23") + assert g.node("s2").properties["price"] == Decimal("4.56") + + +def test_load_edges_with_decimal_schema(): + from decimal import Decimal + + df = pd.DataFrame( + { + "src": ["a", "b"], + "dst": ["b", "c"], + "time": [1, 2], + "weight": ["3.14", "2.71"], + } + ) + g = Graph() + g.load_edges( + df, + time="time", + src="src", + dst="dst", + properties=["weight"], + schema={"weight": PropType.decimal(2)}, + ) + assert g.edge("a", "b").properties["weight"] == Decimal("3.14") + assert g.edge("b", "c").properties["weight"] == Decimal("2.71") + + +def test_load_nodes_with_naive_datetime_schema(): + """`PropType.naive_datetime()` casts a string column to NDTime.""" + df = pd.DataFrame( + { + "id": ["s1", "s2"], + "time": [1, 2], + "ts": ["2024-06-01T12:00:00", "2024-06-02T13:30:00"], + } + ) + g = Graph() + g.load_nodes( + data=df, + id="id", + time="time", + properties=["ts"], + schema={"ts": PropType.naive_datetime()}, + ) + assert g.node("s1").properties["ts"] == datetime.datetime(2024, 6, 1, 12, 0, 0) + assert g.node("s2").properties["ts"] == datetime.datetime(2024, 6, 2, 13, 30, 0) + + +def test_load_nodes_with_aware_datetime_schema(): + """`PropType.datetime()` casts to DTime (timezone-aware UTC).""" + df = pd.DataFrame( + { + "id": ["s1", "s2"], + "time": [1, 2], + "ts": ["2024-06-01T12:00:00+00:00", "2024-06-02T13:30:00+00:00"], + } + ) + g = Graph() + g.load_nodes( + data=df, + id="id", + time="time", + properties=["ts"], + schema={"ts": PropType.datetime()}, + ) + assert g.node("s1").properties["ts"] == datetime.datetime( + 2024, 6, 1, 12, 0, 0, tzinfo=datetime.timezone.utc + ) + assert g.node("s2").properties["ts"] == datetime.datetime( + 2024, 6, 2, 13, 30, 0, tzinfo=datetime.timezone.utc + ) + + +def test_load_edges_with_datetime_schema(): + df = pd.DataFrame( + { + "src": ["a", "b"], + "dst": ["b", "c"], + "time": [1, 2], + "scheduled_at": [ + "2024-06-01T09:00:00+00:00", + "2024-06-02T17:00:00+00:00", + ], + } + ) + g = Graph() + g.load_edges( + df, + time="time", + src="src", + dst="dst", + properties=["scheduled_at"], + schema={"scheduled_at": PropType.datetime()}, + ) + assert g.edge("a", "b").properties["scheduled_at"] == datetime.datetime( + 2024, 6, 1, 9, 0, 0, tzinfo=datetime.timezone.utc + ) diff --git a/python/tests/test_base_install/test_props.py b/python/tests/test_base_install/test_props.py index 76db5d61e7..03af3e34be 100644 --- a/python/tests/test_base_install/test_props.py +++ b/python/tests/test_base_install/test_props.py @@ -1,6 +1,7 @@ -from raphtory import Prop +from raphtory import Graph, Prop from utils import expect_unify_error, assert_in_all from decimal import Decimal +from datetime import datetime, timezone import pytest @@ -158,3 +159,127 @@ def test_map_with_nested_list_that_is_heterogeneous_rejected(): } ).dtype() ) + + +def test_aware_datetime(): + dt = datetime(2024, 6, 1, 12, 30, 45, tzinfo=timezone.utc) + p = Prop.aware_datetime(dt) + assert str(p.dtype()) == "DTime" + assert "2024-06-01" in repr(p) + + +def test_aware_datetime_rejects_naive_input(): + """`Prop.aware_datetime` requires tz-aware input — use `naive_datetime` for naive ones.""" + naive = datetime(2024, 6, 1, 12, 30, 45) + with pytest.raises(TypeError): + Prop.aware_datetime(naive) + + +def test_naive_datetime(): + dt = datetime(2024, 6, 1, 12, 30, 45) + p = Prop.naive_datetime(dt) + assert str(p.dtype()) == "NDTime" + assert "2024-06-01" in repr(p) + + +def test_decimal_from_string(): + p = Prop.decimal("1234.5678") + # Decimal stores scale; dtype reports it. + assert str(p.dtype()) == "Decimal { scale: 4 }" + + +def test_decimal_from_negative_string(): + p = Prop.decimal("-0.001") + assert str(p.dtype()) == "Decimal { scale: 3 }" + + +def test_decimal_from_string_zero_scale(): + p = Prop.decimal("42") + assert str(p.dtype()) == "Decimal { scale: 0 }" + + +def test_decimal_from_python_decimal(): + p = Prop.decimal(Decimal("99.99")) + assert str(p.dtype()) == "Decimal { scale: 2 }" + + +def test_decimal_from_python_decimal_high_precision(): + """`decimal.Decimal` preserves precision regardless of float limits.""" + p = Prop.decimal(Decimal("1.234567890123456789012345")) + assert str(p.dtype()) == "Decimal { scale: 24 }" + + +def test_decimal_from_int(): + p = Prop.decimal(7) + assert str(p.dtype()) == "Decimal { scale: 0 }" + + +def test_decimal_from_negative_int(): + p = Prop.decimal(-42) + assert str(p.dtype()) == "Decimal { scale: 0 }" + + +def test_decimal_from_large_int(): + p = Prop.decimal(2**62) + assert str(p.dtype()) == "Decimal { scale: 0 }" + + +def test_decimal_from_float(): + p = Prop.decimal(1.5) + assert "Decimal" in str(p.dtype()) + + +def test_decimal_from_negative_float(): + p = Prop.decimal(-3.25) + assert "Decimal" in str(p.dtype()) + + +def test_decimal_rejects_non_numeric_string(): + with pytest.raises(TypeError): + Prop.decimal("not a number") + + +def test_decimal_rejects_unsupported_type(): + with pytest.raises(TypeError): + Prop.decimal([1, 2, 3]) + + +def test_decimal_in_graph_roundtrips(): + """Decimal Props attach to graph entities and are readable back.""" + g = Graph() + g.add_node(1, "n", properties={"price": Prop.decimal("19.99")}) + val = g.node("n").properties.get("price") + assert val == Decimal("19.99") + + +def test_decimal_in_graph_from_int_then_read_back(): + g = Graph() + g.add_node(1, "n", properties={"count": Prop.decimal(42)}) + val = g.node("n").properties.get("count") + assert val == Decimal("42") + + +def test_decimal_in_graph_from_float_then_read_back(): + g = Graph() + g.add_node(1, "n", properties={"ratio": Prop.decimal(1.5)}) + val = g.node("n").properties.get("ratio") + assert val == Decimal("1.5") + + +def test_decimal_list_in_graph(): + """Lists of Decimal Props inherit a unified scale.""" + g = Graph() + g.add_node( + 1, + "n", + properties={"prices": Prop.list([Prop.decimal("1.25"), Prop.decimal("2.50")])}, + ) + vals = g.node("n").properties.get("prices") + assert vals == [Decimal("1.25"), Decimal("2.50")] + + +def test_decimal_list_rejects_mixed_scales(): + """Mixing decimal scales in a list errors at unification time.""" + expect_unify_error( + lambda: Prop.list([Prop.decimal("1.25"), Prop.decimal("2.5")]).dtype() + ) diff --git a/python/tests/test_vectors/test_graphql_vectors.py b/python/tests/test_vectors/test_graphql_vectors.py index b0aa5fdd43..6a22c0b35c 100644 --- a/python/tests/test_vectors/test_graphql_vectors.py +++ b/python/tests/test_vectors/test_graphql_vectors.py @@ -94,6 +94,57 @@ def test_upload_graph(): assert_correct_documents(client) +def test_vectorised_graph_window_accepts_time_input_shapes(): + """`VectorisedGraphWindow.{start, end}` accepts every `TimeInput` shape — + Int, RFC3339 string, and `{timestamp, eventId}` object. + + Verifies the schema accepts each form and that all three forms produce + the *same* result for the same time bounds (i.e. they're parsed + equivalently).""" + work_dir = tempfile.TemporaryDirectory() + server = GraphServer(work_dir.name) + with embeddings.start(7340): + with server.start(): + client = RaphtoryClient("http://localhost:1736") + client.new_graph("abb", "EVENT") + rg = client.remote_graph("abb") + setup_graph(rg) + # `model` and `apiBase` point at the mock embedding server above, + # so the model name is just a placeholder identifier. + client.query(""" + { + vectoriseGraph(path: "abb", model: { openAI: { model: "mock-model", apiBase: "http://localhost:7340" } }, nodes: { custom: "{{ name }}" }, edges: { enabled: false }) + } + """) + + def run(window_literal: str): + q = """ + { + vectorisedGraph(path: "abb") { + entitiesBySimilarity(query: "aab", limit: 5, window: %s) { + getDocuments { entity { ... on Node { name } } } + } + } + } + """ % window_literal + return client.query(q) + + # Same time bounds, three different input shapes — all should be + # accepted by the schema and produce identical results. + int_form = run("{ start: 0, end: 1000 }") + str_form = run( + '{ start: "1970-01-01T00:00:00.000Z", end: "1970-01-01T00:00:01.000Z" }' + ) + obj_form = run( + "{ start: {timestamp: 0, eventId: 0}, end: {timestamp: 1000, eventId: 0} }" + ) + + assert int_form == str_form == obj_form, ( + "All three TimeInput shapes should produce identical results " + f"for equivalent time bounds.\nint: {int_form}\nstr: {str_form}\nobj: {obj_form}" + ) + + GRAPH_NAME = "abb" diff --git a/raphtory-api/src/python/prop.rs b/raphtory-api/src/python/prop.rs index d92a599145..0b0e1f4b70 100644 --- a/raphtory-api/src/python/prop.rs +++ b/raphtory-api/src/python/prop.rs @@ -3,6 +3,7 @@ use crate::core::{ storage::arc_str::ArcStr, }; use bigdecimal::BigDecimal; +use chrono::{DateTime, NaiveDateTime, Utc}; use pyo3::{ exceptions::PyTypeError, prelude::*, @@ -112,62 +113,146 @@ pub struct PyProp(pub Prop); #[pymethods] impl PyProp { + /// Construct a `Prop` holding an unsigned 8-bit integer. + /// + /// Arguments: + /// value (int): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn u8(value: u8) -> Self { PyProp(Prop::U8(value)) } + /// Construct a `Prop` holding an unsigned 16-bit integer. + /// + /// Arguments: + /// value (int): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn u16(value: u16) -> Self { PyProp(Prop::U16(value)) } + /// Construct a `Prop` holding an unsigned 32-bit integer. + /// + /// Arguments: + /// value (int): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn u32(value: u32) -> Self { PyProp(Prop::U32(value)) } + /// Construct a `Prop` holding an unsigned 64-bit integer. + /// + /// Arguments: + /// value (int): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn u64(value: u64) -> Self { PyProp(Prop::U64(value)) } + /// Construct a `Prop` holding a signed 32-bit integer. + /// + /// Arguments: + /// value (int): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn i32(value: i32) -> Self { PyProp(Prop::I32(value)) } + /// Construct a `Prop` holding a signed 64-bit integer. + /// + /// Arguments: + /// value (int): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn i64(value: i64) -> Self { PyProp(Prop::I64(value)) } + /// Construct a `Prop` holding a 32-bit float. + /// + /// Arguments: + /// value (float): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn f32(value: f32) -> Self { PyProp(Prop::F32(value)) } + /// Construct a `Prop` holding a 64-bit float. + /// + /// Arguments: + /// value (float): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn f64(value: f64) -> Self { PyProp(Prop::F64(value)) } + /// Construct a `Prop` holding a string. + /// + /// Arguments: + /// value (str): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn str(value: &str) -> Self { PyProp(Prop::str(value)) } + /// Construct a `Prop` holding a boolean. + /// + /// Arguments: + /// value (bool): the value to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn bool(value: bool) -> Self { PyProp(Prop::Bool(value)) } + /// Construct a `Prop` holding a list of values. + /// + /// Arguments: + /// values (list): the values to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn list(values: &Bound<'_, PyAny>) -> PyResult { let elems: Vec = values.extract()?; Ok(PyProp(Prop::list(elems))) } + /// Construct a `Prop` holding a string-keyed map of values. + /// + /// Arguments: + /// dict (dict[str, Any]): the map to wrap. + /// + /// Returns: + /// Prop: #[staticmethod] pub fn map(dict: Bound<'_, PyDict>) -> PyResult { let items: HashMap = dict.extract()?; @@ -180,6 +265,71 @@ impl PyProp { Ok(PyProp(Prop::Map(Arc::new(map)))) } + /// Construct a `Prop` holding a timezone-aware datetime (stored as UTC). + /// + /// Arguments: + /// value (datetime): a timezone-aware datetime. Use `Prop.naive_datetime` for naive ones. + /// + /// Returns: + /// Prop: + #[staticmethod] + pub fn aware_datetime(value: DateTime) -> Self { + PyProp(Prop::DTime(value)) + } + + /// Construct a `Prop` holding a naive (timezone-unaware) datetime. + /// + /// Arguments: + /// value (datetime): the value to wrap (any tz info is dropped). + /// + /// Returns: + /// Prop: + #[staticmethod] + pub fn naive_datetime(value: NaiveDateTime) -> Self { + PyProp(Prop::NDTime(value)) + } + + /// Construct a `Prop` holding an arbitrary-precision decimal. + /// + /// Arguments: + /// value (Decimal | str | int | float): the value to wrap. Strings must + /// parse as a decimal. Note that floats only have ~15-17 digits of + /// precision — pass a string or `decimal.Decimal` for higher precision. + /// + /// Returns: + /// Prop: + #[staticmethod] + pub fn decimal(value: &Bound<'_, PyAny>) -> PyResult { + let bd = if value.get_type().name()?.contains("Decimal")? { + // decimal.Decimal — go via its str representation for full precision. + let s = value.str()?.to_cow()?.into_owned(); + BigDecimal::from_str(&s) + .map_err(|_| PyTypeError::new_err(format!("Could not convert {s} to Decimal")))? + } else if let Ok(i) = value.extract::() { + BigDecimal::from(i) + } else if let Ok(u) = value.extract::() { + BigDecimal::from(u) + } else if let Ok(f) = value.extract::() { + BigDecimal::try_from(f) + .map_err(|_| PyTypeError::new_err(format!("Could not convert {f} to Decimal")))? + } else if let Ok(s) = value.extract::() { + BigDecimal::from_str(&s) + .map_err(|_| PyTypeError::new_err(format!("Could not convert {s} to Decimal")))? + } else { + return Err(PyTypeError::new_err(format!( + "Could not convert {:?} to Decimal", + value + ))); + }; + let prop = Prop::try_from_bd(bd) + .map_err(|_| PyTypeError::new_err(format!("Decimal too large: {value:?}")))?; + Ok(PyProp(prop)) + } + + /// Returns the `PropType` of the wrapped value. + /// + /// Returns: + /// PropType: pub fn dtype(&self) -> PropType { self.0.dtype() } @@ -268,71 +418,146 @@ pub struct PyPropType(pub PropType); #[pymethods] impl PyPropType { + /// Unsigned 8-bit integer type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn u8() -> PropType { PropType::U8 } + /// Unsigned 16-bit integer type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn u16() -> PropType { PropType::U16 } + /// Unsigned 32-bit integer type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn u32() -> PropType { PropType::U32 } + /// Unsigned 64-bit integer type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn u64() -> PropType { PropType::U64 } + /// Signed 32-bit integer type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn i32() -> PropType { PropType::I32 } + /// Signed 64-bit integer type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn i64() -> PropType { PropType::I64 } + /// 32-bit float type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn f32() -> PropType { PropType::F32 } + /// 64-bit float type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn f64() -> PropType { PropType::F64 } + /// String type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn str() -> PropType { PropType::Str } + /// Boolean type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn bool() -> PropType { PropType::Bool } + /// Naive datetime type (timezone-unaware). + /// + /// Returns: + /// PropType: #[staticmethod] pub fn naive_datetime() -> PropType { PropType::NDTime } + /// Datetime type (timezone-aware). + /// + /// Returns: + /// PropType: #[staticmethod] pub fn datetime() -> PropType { PropType::DTime } + /// Arbitrary-precision decimal type with a fixed scale (number of digits + /// after the decimal point). + /// + /// Arguments: + /// scale (int): the number of digits after the decimal point. + /// + /// Returns: + /// PropType: + #[staticmethod] + pub fn decimal(scale: i64) -> PropType { + PropType::Decimal { scale } + } + + /// List type with a single element type. + /// + /// Arguments: + /// p (PropType): element type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn list(p: PropType) -> PropType { PropType::List(Box::new(p)) } + /// Map type with string keys and typed values. + /// + /// Arguments: + /// hash_map (dict[str, PropType]): mapping from key name to value type. + /// + /// Returns: + /// PropType: #[staticmethod] pub fn map(hash_map: HashMap) -> PropType { PropType::Map(Arc::new(hash_map)) diff --git a/raphtory-api/src/python/timeindex.rs b/raphtory-api/src/python/timeindex.rs index a0bce05212..067bcfe6ed 100644 --- a/raphtory-api/src/python/timeindex.rs +++ b/raphtory-api/src/python/timeindex.rs @@ -165,8 +165,8 @@ fn parse_email_timestamp(timestamp: &str) -> PyResult { }) } -/// Raphtory’s EventTime. -/// Represents a unique timepoint in the graph’s history as (timestamp, event_id). +/// Raphtory's EventTime. +/// Represents a unique timepoint in the graph's history as (timestamp, event_id). /// /// - timestamp: Number of milliseconds since the Unix epoch. /// - event_id: ID used for ordering between equal timestamps. @@ -176,6 +176,10 @@ fn parse_email_timestamp(timestamp: &str) -> PyResult { /// EventTime can be converted into a timestamp or a Python datetime, and compared /// either by timestamp (against ints/floats/datetimes/strings), by tuple of (timestamp, event_id), /// or against another EventTime. +/// +/// Arguments: +/// timestamp (int | float | datetime | str): A time input convertible to an EventTime. +/// event_id (int | float | datetime | str | None): Optionally, specify the event id. Defaults to None. #[pyclass(name = "EventTime", module = "raphtory", frozen)] #[derive(Debug, Clone, Copy, Serialize, PartialEq, Ord, PartialOrd, Eq)] pub struct PyEventTime { @@ -367,7 +371,7 @@ impl PyOptionalEventTime { /// Returns the timestamp in milliseconds since the Unix epoch if an EventTime is contained, or else None. /// /// Returns: - /// int | None: Milliseconds since the Unix epoch. + /// Optional[int]: Milliseconds since the Unix epoch. #[getter] pub fn t(&self) -> Option { self.inner.map(|t| t.t()) @@ -376,7 +380,7 @@ impl PyOptionalEventTime { /// Returns the UTC datetime representation of this EventTime's timestamp if an EventTime is contained, or else None. /// /// Returns: - /// datetime | None: The UTC datetime. + /// Optional[datetime]: The UTC datetime. /// /// Raises: /// TimeError: Returns TimeError on timestamp conversion errors (e.g. out-of-range timestamp). @@ -388,7 +392,7 @@ impl PyOptionalEventTime { /// Returns the event id used to order events within the same timestamp if an EventTime is contained, or else None. /// /// Returns: - /// int | None: The event id. + /// Optional[int]: The event id. #[getter] pub fn event_id(&self) -> Option { self.inner.map(|t| t.i()) @@ -413,7 +417,7 @@ impl PyOptionalEventTime { /// Returns the contained EventTime if it exists, or else None. /// /// Returns: - /// EventTime | None: + /// Optional[EventTime]: pub fn get_event_time(&self) -> Option { self.inner } @@ -421,7 +425,7 @@ impl PyOptionalEventTime { /// Return this entry as a tuple of (timestamp, event_id), where the timestamp is in milliseconds if an EventTime is contained, or else None. /// /// Returns: - /// tuple[int,int] | None: (timestamp, event_id). + /// Optional[tuple[int, int]]: (timestamp, event_id). #[getter] pub fn as_tuple(&self) -> Option<(i64, usize)> { self.inner.map(|t| t.as_tuple()) diff --git a/raphtory-graphql/schema.graphql b/raphtory-graphql/schema.graphql index 2840a39ffb..825000c451 100644 --- a/raphtory-graphql/schema.graphql +++ b/raphtory-graphql/schema.graphql @@ -42,7 +42,20 @@ type CollectionOfMetaGraph { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [MetaGraph!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [MetaGraph!]! """ Returns a count of collection objects. """ @@ -63,7 +76,20 @@ type CollectionOfNamespace { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Namespace!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Namespace!]! """ Returns a count of collection objects. """ @@ -84,7 +110,20 @@ type CollectionOfNamespacedItem { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [NamespacedItem!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [NamespacedItem!]! """ Returns a count of collection objects. """ @@ -128,25 +167,45 @@ type Edge { Errors if any of the layers do not exist. """ - layers(names: [String!]!): Edge! + layers( + """ + Layer names to include. + """ + names: [String!]! + ): Edge! """ Returns a view of Edge containing all layers except the excluded list of names. Errors if any of the layers do not exist. """ - excludeLayers(names: [String!]!): Edge! + excludeLayers( + """ + Layer names to exclude. + """ + names: [String!]! + ): Edge! """ Returns a view of Edge containing the specified layer. Errors if any of the layers do not exist. """ - layer(name: String!): Edge! + layer( + """ + Layer name to include. + """ + name: String! + ): Edge! """ Returns a view of Edge containing all layers except the excluded layer specified. Errors if any of the layers do not exist. """ - excludeLayer(name: String!): Edge! + excludeLayer( + """ + Layer name to exclude. + """ + name: String! + ): Edge! """ Creates a WindowSet with the given window duration and optional step using a rolling window. @@ -158,7 +217,20 @@ type Edge { Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before the start of the first window and/or after the end of the last window (i.e. not included in any window). """ - rolling(window: WindowDuration!, step: WindowDuration, alignmentUnit: AlignmentUnit): EdgeWindowSet! + rolling( + """ + Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`). + """ + window: WindowDuration!, + """ + Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap. + """ + step: WindowDuration, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set). + """ + alignmentUnit: AlignmentUnit + ): EdgeWindowSet! """ Creates a WindowSet with the given step size using an expanding window. @@ -168,19 +240,45 @@ type Edge { If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. e.g. "1 month and 1 day" will align at the start of the day. """ - expanding(step: WindowDuration!, alignmentUnit: AlignmentUnit): EdgeWindowSet! + expanding( + """ + How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`). + """ + step: WindowDuration!, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`. + """ + alignmentUnit: AlignmentUnit + ): EdgeWindowSet! """ Creates a view of the Edge including all events between the specified start (inclusive) and end (exclusive). For persistent graphs, any edge which exists at any point during the window will be included. You may want to restrict this to only edges that are present at the end of the window using the is_valid function. """ - window(start: TimeInput!, end: TimeInput!): Edge! + window( + """ + Inclusive lower bound. + """ + start: TimeInput!, + """ + Exclusive upper bound. + """ + end: TimeInput! + ): Edge! """ Creates a view of the Edge including all events at a specified time. """ - at(time: TimeInput!): Edge! + at( + """ + Instant to pin the view to. + """ + time: TimeInput! + ): Edge! """ - Returns a view of the edge at the latest time of the graph. + View of this edge pinned to the graph's latest time — equivalent to + `at(graph.latestTime)`. The edge's properties and metadata show their + most recent values, and (for persistent graphs) validity is evaluated + at that instant. """ latest: Edge! """ @@ -188,7 +286,12 @@ type Edge { This is equivalent to before(time + 1) for Graph and at(time) for PersistentGraph. """ - snapshotAt(time: TimeInput!): Edge! + snapshotAt( + """ + Instant at which entities must be valid. + """ + time: TimeInput! + ): Edge! """ Creates a view of the Edge including all events that are valid at the latest time. @@ -198,36 +301,82 @@ type Edge { """ Creates a view of the Edge including all events before a specified end (exclusive). """ - before(time: TimeInput!): Edge! + before( + """ + Exclusive upper bound. + """ + time: TimeInput! + ): Edge! """ Creates a view of the Edge including all events after a specified start (exclusive). """ - after(time: TimeInput!): Edge! + after( + """ + Exclusive lower bound. + """ + time: TimeInput! + ): Edge! """ Shrinks both the start and end of the window. """ - shrinkWindow(start: TimeInput!, end: TimeInput!): Edge! + shrinkWindow( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput!, + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): Edge! """ Set the start of the window. """ - shrinkStart(start: TimeInput!): Edge! + shrinkStart( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput! + ): Edge! """ Set the end of the window. """ - shrinkEnd(end: TimeInput!): Edge! + shrinkEnd( + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): Edge! """ Takes a specified selection of views and applies them in given order. """ - applyViews(views: [EdgeViewCollection!]!): Edge! + applyViews( + """ + Ordered list of view operations; each entry is a one-of variant (`window`, `layer`, `filter`, ...) applied to the running result. + """ + views: [EdgeViewCollection!]! + ): Edge! """ Returns the earliest time of an edge. """ earliestTime: EventTime! + """ + The timestamp of the first event in this edge's history (first update, first + deletion, or anything in between). Differs from `earliestTime` in that + `earliestTime` reports when the edge is first *valid*; `firstUpdate` reports + when its history actually begins. + """ firstUpdate: EventTime! """ Returns the latest time of an edge. """ latestTime: EventTime! + """ + The timestamp of the last event in this edge's history (last update, last + deletion, or anything in between). Differs from `latestTime` in that + `latestTime` reports when the edge is last *valid*; `lastUpdate` reports + when its history actually ends. + """ lastUpdate: EventTime! """ Returns the time of an exploded edge. Errors on an unexploded edge. @@ -263,12 +412,11 @@ type Edge { """ nbr: Node! """ - Returns the id of the edge. - - Returns: - list[str]: + Returns the `[src, dst]` id pair of the edge. Each id is a `String` + for string-indexed graphs or a non-negative `Int` for integer-indexed + graphs. """ - id: [String!]! + id: [NodeId!]! """ Returns a view of the properties of the edge. """ @@ -333,18 +481,27 @@ type Edge { Returns: boolean """ isSelfLoop: Boolean! - filter(expr: EdgeFilter!): Edge! + """ + Apply an edge filter in place, returning an edge view whose properties / + metadata / history are restricted to the matching subset. + """ + filter( + """ + Composite edge filter (by property, layer, src/dst, etc.). + """ + expr: EdgeFilter! + ): Edge! } input EdgeAddition { """ - Source node. + Source node id (string or non-negative integer). """ - src: String! + src: NodeId! """ - Destination node. + Destination node id (string or non-negative integer). """ - dst: String! + dst: NodeId! """ Layer. """ @@ -537,6 +694,11 @@ input EdgeLayersExpr { expr: EdgeFilter! } +""" +Describes the shape of edges between a specific pair of node types — the +property and metadata keys seen on such edges, along with their observed +value types. One `EdgeSchema` per `(srcType, dstType)` pair per layer. +""" type EdgeSchema { """ Returns the type of source for these edges @@ -692,7 +854,16 @@ input EdgeWindowExpr { expr: EdgeFilter! } +""" +A lazy sequence of per-window views of a single edge, produced by +`edge.rolling` / `edge.expanding`. Each entry is the edge as it exists in +that window. +""" type EdgeWindowSet { + """ + Number of windows in this set. Materialising all windows is expensive for + large graphs — prefer `page` over `list` when iterating. + """ count: Int! """ Fetch one page with a number of items up to a specified limit, optionally offset by a specified amount. @@ -701,10 +872,32 @@ type EdgeWindowSet { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Edge!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Edge!]! + """ + Materialise every window as a list. Rejected by the server when bulk list + endpoints are disabled; use `page` for paginated access instead. + """ list: [Edge!]! } +""" +A lazy collection of edges from a graph view. Supports the usual view +transforms (window, layer, filter, ...), plus edge-specific ones like +`explode` and `explodeLayers`, pagination, and sorting. +""" type Edges { """ Returns a collection containing only edges in the default edge layer. @@ -713,19 +906,39 @@ type Edges { """ Returns a collection containing only edges belonging to the listed layers. """ - layers(names: [String!]!): Edges! + layers( + """ + Layer names to include. + """ + names: [String!]! + ): Edges! """ Returns a collection containing edges belonging to all layers except the excluded list of layers. """ - excludeLayers(names: [String!]!): Edges! + excludeLayers( + """ + Layer names to exclude. + """ + names: [String!]! + ): Edges! """ Returns a collection containing edges belonging to the specified layer. """ - layer(name: String!): Edges! + layer( + """ + Layer name to include. + """ + name: String! + ): Edges! """ Returns a collection containing edges belonging to all layers except the excluded layer specified. """ - excludeLayer(name: String!): Edges! + excludeLayer( + """ + Layer name to exclude. + """ + name: String! + ): Edges! """ Creates a WindowSet with the given window duration and optional step using a rolling window. A rolling window is a window that moves forward by step size at each iteration. @@ -737,7 +950,20 @@ type Edges { Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before the start of the first window and/or after the end of the last window (i.e. not included in any window). """ - rolling(window: WindowDuration!, step: WindowDuration, alignmentUnit: AlignmentUnit): EdgesWindowSet! + rolling( + """ + Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`). + """ + window: WindowDuration!, + """ + Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap. + """ + step: WindowDuration, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set). + """ + alignmentUnit: AlignmentUnit + ): EdgesWindowSet! """ Creates a WindowSet with the given step size using an expanding window. An expanding window is a window that grows by step size at each iteration. @@ -747,20 +973,51 @@ type Edges { If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. e.g. "1 month and 1 day" will align at the start of the day. """ - expanding(step: WindowDuration!, alignmentUnit: AlignmentUnit): EdgesWindowSet! + expanding( + """ + How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`). + """ + step: WindowDuration!, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`. + """ + alignmentUnit: AlignmentUnit + ): EdgesWindowSet! """ Creates a view of the Edge including all events between the specified start (inclusive) and end (exclusive). """ - window(start: TimeInput!, end: TimeInput!): Edges! + window( + """ + Inclusive lower bound. + """ + start: TimeInput!, + """ + Exclusive upper bound. + """ + end: TimeInput! + ): Edges! """ Creates a view of the Edge including all events at a specified time. """ - at(time: TimeInput!): Edges! + at( + """ + Instant to pin the view to. + """ + time: TimeInput! + ): Edges! + """ + View showing only the latest state of each edge (equivalent to `at(latestTime)`). + """ latest: Edges! """ Creates a view of the Edge including all events that are valid at time. This is equivalent to before(time + 1) for Graph and at(time) for PersistentGraph. """ - snapshotAt(time: TimeInput!): Edges! + snapshotAt( + """ + Instant at which entities must be valid. + """ + time: TimeInput! + ): Edges! """ Creates a view of the Edge including all events that are valid at the latest time. This is equivalent to a no-op for Graph and latest() for PersistentGraph. """ @@ -768,29 +1025,65 @@ type Edges { """ Creates a view of the Edge including all events before a specified end (exclusive). """ - before(time: TimeInput!): Edges! + before( + """ + Exclusive upper bound. + """ + time: TimeInput! + ): Edges! """ Creates a view of the Edge including all events after a specified start (exclusive). """ - after(time: TimeInput!): Edges! + after( + """ + Exclusive lower bound. + """ + time: TimeInput! + ): Edges! """ Shrinks both the start and end of the window. """ - shrinkWindow(start: TimeInput!, end: TimeInput!): Edges! + shrinkWindow( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput!, + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): Edges! """ Set the start of the window. """ - shrinkStart(start: TimeInput!): Edges! + shrinkStart( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput! + ): Edges! """ Set the end of the window. """ - shrinkEnd(end: TimeInput!): Edges! + shrinkEnd( + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): Edges! """ Takes a specified selection of views and applies them in order given. """ - applyViews(views: [EdgesViewCollection!]!): Edges! + applyViews( + """ + Ordered list of view operations; each entry is a one-of variant (`window`, `layer`, `filter`, ...) applied to the running result. + """ + views: [EdgesViewCollection!]! + ): Edges! """ - Returns an edge object for each update within the original edge. + Expand each edge into one edge per update: if `A->B` has three updates, it + becomes three `A->B` entries each at a distinct timestamp. Use this to + iterate per-event rather than per-edge. """ explode: Edges! """ @@ -800,9 +1093,15 @@ type Edges { """ explodeLayers: Edges! """ - Specify a sort order from: source, destination, property, time. You can also reverse the ordering. + Sort the edges. Multiple criteria are applied lexicographically (ties + on the first key break to the second, etc.). """ - sorted(sortBys: [EdgeSortBy!]!): Edges! + sorted( + """ + Ordered list of sort keys. Each entry chooses exactly one of `src` / `dst` / `time` / `property`, with an optional `reverse: true` to flip order. + """ + sortBys: [EdgeSortBy!]! + ): Edges! """ Returns the start time of the window or none if there is no window. """ @@ -825,19 +1124,73 @@ type Edges { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Edge!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Edge!]! """ Returns a list of all objects in the current selection of the collection. You should filter the collection first then call list. """ list: [Edge!]! """ - Returns a filtered view that applies to list down the chain - """ - filter(expr: EdgeFilter!): Edges! + Narrow the collection to edges matching `expr`. The filter sticks to the + returned view — every subsequent traversal through these edges (their + properties, their endpoints' neighbours, etc.) continues to see the + filtered scope. + + Useful when you want one scoping rule to apply across the whole query. + E.g. restricting everything to a specific week: + + ```text + edges { filter(expr: {window: {start: 1234, end: 5678}}) { + list { src { neighbours { list { name } } } } # neighbours still windowed + } } + ``` + + Contrast with `select`, which applies here and is not carried through. """ - Returns filtered list of edges + filter( + """ + Composite edge filter (by property, layer, src/dst, etc.). + """ + expr: EdgeFilter! + ): Edges! """ - select(expr: EdgeFilter!): Edges! + Narrow the collection to edges matching `expr`, but only at this step — + subsequent traversals out of these edges see the unfiltered graph again. + + Useful when you want different scopes at different hops. E.g. Monday's + edges, then the neighbours of their endpoints on Tuesday, then *those* + neighbours on Wednesday: + + ```text + edges { select(expr: {window: {...monday...}}) { + list { src { select(expr: {window: {...tuesday...}}) { + neighbours { select(expr: {window: {...wednesday...}}) { + neighbours { list { name } } + } } + } } } + } } + ``` + + Contrast with `filter`, which persists the scope through subsequent ops. + """ + select( + """ + Composite edge filter (by property, layer, src/dst, etc.). + """ + expr: EdgeFilter! + ): Edges! } input EdgesViewCollection @oneOf { @@ -903,7 +1256,16 @@ input EdgesViewCollection @oneOf { edgeFilter: EdgeFilter } +""" +A lazy sequence of per-window edge collections, produced by +`edges.rolling` / `edges.expanding`. Each entry is an `Edges` collection +as it exists in that window. +""" type EdgesWindowSet { + """ + Number of windows in this set. Materialising all windows is expensive for + large graphs — prefer `page` over `list` when iterating. + """ count: Int! """ Fetch one page with a number of items up to a specified limit, optionally offset by a specified amount. @@ -912,7 +1274,24 @@ type EdgesWindowSet { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Edges!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Edges!]! + """ + Materialise every window as a list. Rejected by the server when bulk list + endpoints are disabled; use `page` for paginated access instead. + """ list: [Edges!]! } @@ -951,50 +1330,113 @@ type EventTime { Refer to chrono::format::strftime for formatting specifiers and escape sequences. Raises an error if a time conversion fails. """ - datetime(formatString: String): String + datetime( + """ + Optional format string for the rendered datetime. Uses `%`-style specifiers — for example `%Y-%m-%d` for `2024-01-15`, `%Y-%m-%d %H:%M:%S` for `2024-01-15 10:30:00`, or `%H:%M` for `10:30`. Defaults to RFC 3339 (e.g. `2024-01-15T10:30:45.123+00:00`) when omitted. + """ + formatString: String + ): String } +""" +A view of a Raphtory graph. Every field here returns either data from the +view or a derived view (`window`, `layer`, `at`, `filter`, ...) that you can +keep chaining. Views are cheap — they don't copy the underlying data. +""" type Graph { """ Returns the names of all layers in the graphview. + Distinct layer names observed in the current view — any layer that has at + least one edge event visible here. Excludes layers that exist elsewhere in + the graph but whose edges have been filtered out. """ uniqueLayers: [String!]! """ - Returns a view containing only the default layer. + View restricted to the default layer — where nodes and edges end up + when `addNode` / `addEdge` is called without a `layer` argument. + Useful for separating "unlayered" base-graph events from named-layer + ones. """ defaultLayer: Graph! """ - Returns a view containing all the specified layers. - """ - layers(names: [String!]!): Graph! - """ - Returns a view containing all layers except the specified excluded layers. - """ - excludeLayers(names: [String!]!): Graph! - """ - Returns a view containing the layer specified. - """ - layer(name: String!): Graph! - """ - Returns a view containing all layers except the specified excluded layer. - """ - excludeLayer(name: String!): Graph! - """ - Returns a subgraph of a specified set of nodes which contains only the edges that connect nodes of the subgraph to each other. - """ - subgraph(nodes: [String!]!): Graph! - """ - Returns a view of the graph that only includes valid edges. + View restricted to the named layers. Updates on any other layer are hidden; + if that leaves a node or edge with no updates left, it disappears from the + view. + """ + layers( + """ + Layer names to include. + """ + names: [String!]! + ): Graph! + """ + View with the named layers hidden. Updates on those layers are removed; if + that leaves a node or edge with no updates left, it disappears from the + view. + """ + excludeLayers( + """ + Layer names to exclude. + """ + names: [String!]! + ): Graph! + """ + View restricted to a single layer. Convenience form of + `layers(names: [name])` — updates on any other layer are hidden, and + entities with nothing left disappear. + """ + layer( + """ + Layer name to include. + """ + name: String! + ): Graph! + """ + View with one layer hidden. Convenience form of + `excludeLayers(names: [name])` — updates on that layer are removed, and + entities with nothing left disappear. + """ + excludeLayer( + """ + Layer name to exclude. + """ + name: String! + ): Graph! + """ + View restricted to a chosen set of nodes and the edges between them. Edges + connecting a selected node to a non-selected node are hidden. + """ + subgraph( + """ + Node ids to keep. + """ + nodes: [NodeId!]! + ): Graph! + """ + View containing only valid edges — for persistent graphs this drops edges + whose most recent event is a deletion at the latest time of the current + view (a later re-addition would keep them). On event graphs this is a + no-op. """ valid: Graph! """ - Returns a subgraph filtered by the specified node types. + View restricted to nodes with the given node types. """ - subgraphNodeTypes(nodeTypes: [String!]!): Graph! + subgraphNodeTypes( + """ + Node types to include. + """ + nodeTypes: [String!]! + ): Graph! """ - Returns a subgraph containing all nodes except the specified excluded nodes. + View with a set of nodes removed (along with any edges touching them). """ - excludeNodes(nodes: [String!]!): Graph! + excludeNodes( + """ + Node ids to exclude. + """ + nodes: [NodeId!]! + ): Graph! """ Creates a rolling window with the specified window size and an optional step. @@ -1006,7 +1448,20 @@ type Graph { Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before the start of the first window and/or after the end of the last window (i.e. not included in any window). """ - rolling(window: WindowDuration!, step: WindowDuration, alignmentUnit: AlignmentUnit): GraphWindowSet! + rolling( + """ + Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`). + """ + window: WindowDuration!, + """ + Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap. + """ + step: WindowDuration, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set). + """ + alignmentUnit: AlignmentUnit + ): GraphWindowSet! """ Creates an expanding window with the specified step size. @@ -1016,15 +1471,38 @@ type Graph { If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. e.g. "1 month and 1 day" will align at the start of the day. """ - expanding(step: WindowDuration!, alignmentUnit: AlignmentUnit): GraphWindowSet! + expanding( + """ + How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`). + """ + step: WindowDuration!, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`. + """ + alignmentUnit: AlignmentUnit + ): GraphWindowSet! """ Return a graph containing only the activity between start and end, by default raphtory stores times in milliseconds from the unix epoch. """ - window(start: TimeInput!, end: TimeInput!): Graph! + window( + """ + Inclusive lower bound. + """ + start: TimeInput!, + """ + Exclusive upper bound. + """ + end: TimeInput! + ): Graph! """ Creates a view including all events at a specified time. """ - at(time: TimeInput!): Graph! + at( + """ + Instant to pin the view to. + """ + time: TimeInput! + ): Graph! """ Creates a view including all events at the latest time. """ @@ -1032,7 +1510,12 @@ type Graph { """ Create a view including all events that are valid at the specified time. """ - snapshotAt(time: TimeInput!): Graph! + snapshotAt( + """ + Instant at which entities must be valid. + """ + time: TimeInput! + ): Graph! """ Create a view including all events that are valid at the latest time. """ @@ -1040,25 +1523,57 @@ type Graph { """ Create a view including all events before a specified end (exclusive). """ - before(time: TimeInput!): Graph! + before( + """ + Exclusive upper bound. + """ + time: TimeInput! + ): Graph! """ Create a view including all events after a specified start (exclusive). """ - after(time: TimeInput!): Graph! - """ - Shrink both the start and end of the window. - """ - shrinkWindow(start: TimeInput!, end: TimeInput!): Graph! + after( + """ + Exclusive lower bound. + """ + time: TimeInput! + ): Graph! + """ + Shrink both the start and end of the window. The new bounds are taken as the + intersection with the current window; this never widens the view. + """ + shrinkWindow( + """ + Proposed new start (TimeInput); ignored if before the current start. + """ + start: TimeInput!, + """ + Proposed new end (TimeInput); ignored if after the current end. + """ + end: TimeInput! + ): Graph! """ Set the start of the window to the larger of the specified value or current start. """ - shrinkStart(start: TimeInput!): Graph! + shrinkStart( + """ + Proposed new start (TimeInput); has no effect if it would widen the window. + """ + start: TimeInput! + ): Graph! """ Set the end of the window to the smaller of the specified value or current end. """ - shrinkEnd(end: TimeInput!): Graph! + shrinkEnd( + """ + Proposed new end (TimeInput); has no effect if it would widen the window. + """ + end: TimeInput! + ): Graph! """ - Returns the timestamp for the creation of the graph. + Filesystem creation timestamp (epoch millis) of the graph's on-disk folder + — i.e. when this graph was first saved to the server, not when its earliest + event occurred. Use `earliestTime` for the latter. """ created: Int! """ @@ -1086,13 +1601,26 @@ type Graph { """ end: EventTime! """ - Returns the earliest time that any edge in this graph is valid. + The earliest time at which any edge in this graph is valid. + + * `includeNegative` — if false, edge events with a timestamp `< 0` are + skipped when computing the minimum. Defaults to true. """ - earliestEdgeTime(includeNegative: Boolean): EventTime! + earliestEdgeTime( + """ + If false, edge events with a timestamp `< 0` are skipped when computing the minimum. Defaults to true. + """ + includeNegative: Boolean + ): EventTime! """ - Returns the latest time that any edge in this graph is valid. + The latest time at which any edge in this graph is valid. """ - latestEdgeTime(includeNegative: Boolean): EventTime! + latestEdgeTime( + """ + If false, edge events with a timestamp `< 0` are skipped when computing the maximum. Defaults to true. + """ + includeNegative: Boolean + ): EventTime! """ Returns the number of edges in the graph. @@ -1111,29 +1639,74 @@ type Graph { """ countNodes: Int! """ - Returns true if the graph contains the specified node. - """ - hasNode(name: String!): Boolean! - """ - Returns true if the graph contains the specified edge. Edges are specified by providing a source and destination node id. You can restrict the search to a specified layer. - """ - hasEdge(src: String!, dst: String!, layer: String): Boolean! - """ - Gets the node with the specified id. - """ - node(name: String!): Node - """ - Gets (optionally a subset of) the nodes in the graph. - """ - nodes(select: NodeFilter): Nodes! - """ - Gets the edge with the specified source and destination nodes. - """ - edge(src: String!, dst: String!): Edge - """ - Gets the edges in the graph. - """ - edges(select: EdgeFilter): Edges! + Returns true if a node with the given id exists in this view. + """ + hasNode( + """ + Node id to look up. + """ + name: NodeId! + ): Boolean! + """ + Returns true if an edge exists between `src` and `dst` in this view, optionally + restricted to a single layer. + """ + hasEdge( + """ + Source node id. + """ + src: NodeId!, + """ + Destination node id. + """ + dst: NodeId!, + """ + Optional; if provided, only checks whether the edge exists on this layer. If null or omitted, any layer counts. + """ + layer: String + ): Boolean! + """ + Look up a single node by id. Returns null if the node doesn't exist in this + view. + """ + node( + """ + Node id. + """ + name: NodeId! + ): Node + """ + All nodes in this view, optionally narrowed by a filter. + """ + nodes( + """ + Optional node filter (by name, property, type, etc.). If omitted, every node in the view is returned. + """ + select: NodeFilter + ): Nodes! + """ + Look up a single edge by its endpoint ids. Returns null if no edge exists + between `src` and `dst` in this view. + """ + edge( + """ + Source node id. + """ + src: NodeId!, + """ + Destination node id. + """ + dst: NodeId! + ): Edge + """ + All edges in this view, optionally narrowed by a filter. + """ + edges( + """ + Optional edge filter (by property, layer, src/dst, etc.). If omitted, every edge in the view is returned. + """ + select: EdgeFilter + ): Edges! """ Returns the properties of the graph. """ @@ -1158,36 +1731,116 @@ type Graph { Returns the graph schema. """ schema: GraphSchema! - algorithms: GraphAlgorithmPlugin! - sharedNeighbours(selectedNodes: [String!]!): [Node!]! """ - Export all nodes and edges from this graph view to another existing graph + Access registered graph algorithms (PageRank, shortest path, etc.) for this + graph view. The set of available algorithms is defined by the plugin registry + loaded at server startup. """ - exportTo(path: String!): Boolean! - filter(expr: GraphFilter): Graph! - filterNodes(expr: NodeFilter!): Graph! - filterEdges(expr: EdgeFilter!): Graph! + algorithms: GraphAlgorithmPlugin! + """ + Nodes that are neighbours of every node in `selectedNodes`. Returns the + intersection of each selected node's neighbour set (undirected). + """ + sharedNeighbours( + """ + Node ids whose common neighbours you want. Returns an empty list if `selectedNodes` is empty or any id does not exist. + """ + selectedNodes: [NodeId!]! + ): [Node!]! + """ + Copy all nodes and edges of the current graph view into another already- + existing graph stored on the server. The destination graph is preserved + — this only adds; it does not replace. + """ + exportTo( + """ + Destination graph path relative to the root namespace. + """ + path: String! + ): Boolean! + """ + Returns a filtered view of the graph. Applies a mixed node/edge filter + expression and narrows nodes, edges, and their properties to what matches. + """ + filter( + """ + Optional composite filter combining node, edge, property, and metadata conditions. If omitted, applies the identity filter (equivalent to no filtering). + """ + expr: GraphFilter + ): Graph! + """ + Returns a graph view restricted to nodes that match the given filter; edges + are kept only if both endpoints survive. + """ + filterNodes( + """ + Composite node filter (by name, property, type, etc.). + """ + expr: NodeFilter! + ): Graph! + """ + Returns a graph view restricted to edges that match the given filter. Nodes + remain in the view even if all their edges are filtered out. + """ + filterEdges( + """ + Composite edge filter (by property, layer, src/dst, etc.). + """ + expr: EdgeFilter! + ): Graph! """ (Experimental) Get index specification. """ getIndexSpec: IndexSpec! """ - (Experimental) Searches for nodes which match the given filter expression. - - Uses Tantivy's exact search. - """ - searchNodes(filter: NodeFilter!, limit: Int!, offset: Int!): [Node!]! - """ - (Experimental) Searches the index for edges which match the given filter expression. - - Uses Tantivy's exact search. - """ - searchEdges(filter: EdgeFilter!, limit: Int!, offset: Int!): [Edge!]! - """ - Returns the specified graph view or if none is specified returns the default view. - This allows you to specify multiple operations together. - """ - applyViews(views: [GraphViewCollection!]!): Graph! + (Experimental) Searches for nodes which match the given filter + expression. Uses Tantivy's exact search; requires the graph to have + been indexed. + """ + searchNodes( + """ + Composite node filter (by name, property, type, etc.). + """ + filter: NodeFilter!, + """ + Maximum number of nodes to return. + """ + limit: Int!, + """ + Number of matches to skip before returning results. + """ + offset: Int! + ): [Node!]! + """ + (Experimental) Searches the index for edges which match the given + filter expression. Uses Tantivy's exact search; requires the graph to + have been indexed. + """ + searchEdges( + """ + Composite edge filter (by property, layer, src/dst, etc.). + """ + filter: EdgeFilter!, + """ + Maximum number of edges to return. + """ + limit: Int!, + """ + Number of matches to skip before returning results. + """ + offset: Int! + ): [Edge!]! + """ + Apply a list of view operations in the given order and return the + resulting graph view. Lets callers compose multiple view transforms + (window, layer, filter, snapshot, ...) in a single call. + """ + applyViews( + """ + Ordered list of view operations; each entry is a one-of variant applied to the running result. + """ + views: [GraphViewCollection!]! + ): Graph! } type GraphAlgorithmPlugin { @@ -1327,7 +1980,7 @@ input GraphViewCollection @oneOf { """ Subgraph nodes. """ - subgraph: [String!] + subgraph: [NodeId!] """ Subgraph node types. """ @@ -1335,7 +1988,7 @@ input GraphViewCollection @oneOf { """ List of excluded nodes. """ - excludeNodes: [String!] + excludeNodes: [NodeId!] """ Valid state. """ @@ -1418,9 +2071,16 @@ input GraphWindowExpr { expr: GraphFilter } +""" +A lazy sequence of graph snapshots produced by `rolling` or `expanding`. +Each entry is a `Graph` at a different window over time. Iterate via +`list` / `page` (or count with `count`). Subsequent view ops apply +per-window. +""" type GraphWindowSet { """ - Returns the number of items. + Number of windows in this set. Materialising all windows is expensive for + large graphs — prefer `page` over `list` when iterating. """ count: Int! """ @@ -1430,7 +2090,24 @@ type GraphWindowSet { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Graph!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Graph!]! + """ + Materialise every window as a list. Rejected by the server when bulk list + endpoints are disabled; use `page` for paginated access instead. + """ list: [Graph!]! } @@ -1462,7 +2139,20 @@ type History { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [EventTime!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [EventTime!]! """ Fetch one page of EventTime entries with a number of items up to a specified limit, optionally offset by a specified amount. The page_index sets the number of pages to skip (defaults to 0). @@ -1470,7 +2160,20 @@ type History { For example, if page_rev(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - pageRev(limit: Int!, offset: Int, pageIndex: Int): [EventTime!]! + pageRev( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [EventTime!]! """ Returns True if the history is empty. """ @@ -1490,14 +2193,22 @@ type History { Optionally, a format string can be passed to format the output. Defaults to RFC 3339 if not provided (e.g., "2023-12-25T10:30:45.123Z"). Refer to chrono::format::strftime for formatting specifiers and escape sequences. """ - datetimes(formatString: String): HistoryDateTime! + datetimes( + """ + Optional format string for the rendered datetime. Uses `%`-style specifiers — for example `%Y-%m-%d` for `2024-01-15`, `%Y-%m-%d %H:%M:%S` for `2024-01-15 10:30:00`, or `%H:%M` for `10:30`. Defaults to RFC 3339 (e.g. `2024-01-15T10:30:45.123+00:00`) when omitted. + """ + formatString: String + ): HistoryDateTime! """ Returns a HistoryEventId object which accesses event ids of EventTime entries. They are used for ordering within the same timestamp. """ eventId: HistoryEventId! """ - Returns an Intervals object which calculates the intervals between consecutive EventTime timestamps. + Inter-event gap analysis for this history. The returned `Intervals` + object exposes each gap (in milliseconds) between consecutive events, + plus summary statistics — `min` / `max` / `mean` / `median` — and + paginated access via `list` / `listRev` / `page` / `pageRev`. """ intervals: Intervals! } @@ -1511,13 +2222,23 @@ type HistoryDateTime { If filter_broken is set to True, time conversion errors will be ignored. If set to False, a TimeError will be raised on time conversion error. Defaults to False. """ - list(filterBroken: Boolean): [String!]! + list( + """ + If true, ignore unconvertible timestamps; if false, raise an error on the first conversion failure. Defaults to false. + """ + filterBroken: Boolean + ): [String!]! """ List all datetimes formatted as strings in reverse chronological order. If filter_broken is set to True, time conversion errors will be ignored. If set to False, a TimeError will be raised on time conversion error. Defaults to False. """ - listRev(filterBroken: Boolean): [String!]! + listRev( + """ + If true, ignore unconvertible timestamps; if false, raise an error on the first conversion failure. Defaults to false. + """ + filterBroken: Boolean + ): [String!]! """ Fetch one page of datetimes formatted as string with a number of items up to a specified limit, optionally offset by a specified amount. The page_index sets the number of pages to skip (defaults to 0). @@ -1527,7 +2248,24 @@ type HistoryDateTime { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int, filterBroken: Boolean): [String!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int, + """ + If true, skip timestamps whose conversion fails; if false, raise an error on the first conversion failure. Defaults to false. + """ + filterBroken: Boolean + ): [String!]! """ Fetch one page of datetimes formatted as string in reverse chronological order with a number of items up to a specified limit, optionally offset by a specified amount. The page_index sets the number of pages to skip (defaults to 0). @@ -1537,7 +2275,24 @@ type HistoryDateTime { For example, if page_rev(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - pageRev(limit: Int!, offset: Int, pageIndex: Int, filterBroken: Boolean): [String!]! + pageRev( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int, + """ + If true, skip timestamps whose conversion fails; if false, raise an error on the first conversion failure. Defaults to false. + """ + filterBroken: Boolean + ): [String!]! } """ @@ -1559,7 +2314,20 @@ type HistoryEventId { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Int!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Int!]! """ Fetch one page of event ids in reverse chronological order with a number of items up to a specified limit, optionally offset by a specified amount. The page_index sets the number of pages to skip (defaults to 0). @@ -1567,7 +2335,20 @@ type HistoryEventId { For example, if page_rev(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - pageRev(limit: Int!, offset: Int, pageIndex: Int): [Int!]! + pageRev( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Int!]! } """ @@ -1589,7 +2370,20 @@ type HistoryTimestamp { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Int!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Int!]! """ Fetch one page of timestamps in reverse order with a number of items up to a specified limit, optionally offset by a specified amount. The page_index sets the number of pages to skip (defaults to 0). @@ -1597,7 +2391,20 @@ type HistoryTimestamp { For example, if page_rev(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - pageRev(limit: Int!, offset: Int, pageIndex: Int): [Int!]! + pageRev( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Int!]! } type IndexSpec { @@ -1632,13 +2439,13 @@ input IndexSpecInput { input InputEdge { """ - Source node. + Source node id (string or non-negative integer). """ - src: String! + src: NodeId! """ - Destination node. + Destination node id (string or non-negative integer). """ - dst: String! + dst: NodeId! } """ @@ -1660,7 +2467,20 @@ type Intervals { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Int!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Int!]! """ Fetch one page of intervals between consecutive timestamps in reverse order with a number of items up to a specified limit, optionally offset by a specified amount. The page_index sets the number of pages to skip (defaults to 0). @@ -1668,7 +2488,20 @@ type Intervals { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - pageRev(limit: Int!, offset: Int, pageIndex: Int): [Int!]! + pageRev( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Int!]! """ Compute the mean interval between consecutive timestamps. Returns None if fewer than 1 timestamp. """ @@ -1687,6 +2520,10 @@ type Intervals { min: Int } +""" +Describes the shape of a single edge layer — its name and the per +`(srcType, dstType)` edge schemas observed within it. +""" type LayerSchema { """ Returns the name of the layer with this schema @@ -1698,6 +2535,12 @@ type LayerSchema { edges: [EdgeSchema!]! } +""" +Lightweight summary of a stored graph — its name, path, counts, and +filesystem timestamps — served without deserializing the full graph. +Useful for listing what's available on the server before committing to a +full load. +""" type MetaGraph { """ Returns the graph name. @@ -1736,23 +2579,45 @@ type MetaGraph { metadata: [Property!]! } +""" +Constant key/value metadata attached to an entity (node, edge, or graph). +Metadata has no timeline — each key maps to exactly one value for the +lifetime of the entity. Separate from `Properties`, which carries +time-varying data. +""" type Metadata { """ - Get metadata value matching the specified key. + Look up a single metadata value by key. Returns null if no metadata with that + key exists. """ - get(key: String!): Property + get( + """ + The metadata name. + """ + key: String! + ): Property """ - /// Check if the key is in the metadata. + Returns true if a metadata entry with the given key exists. """ - contains(key: String!): Boolean! + contains( + """ + The metadata name to look up. + """ + key: String! + ): Boolean! """ - Return all metadata keys. + All metadata keys present on this entity. """ keys: [String!]! """ - /// Return all metadata values. + All metadata values as `{key, value}` entries. """ - values(keys: [String!]): [Property!]! + values( + """ + Optional whitelist. If provided, only metadata with these keys is returned; if omitted, every metadata entry is returned. + """ + keys: [String!] + ): [Property!]! } type MutRoot { @@ -1761,48 +2626,120 @@ type MutRoot { """ plugins: MutationPlugin! """ - Delete graph from a path on the server. - """ - deleteGraph(path: String!): Boolean! - """ - Creates a new graph. - """ - newGraph(path: String!, graphType: GraphType!): Boolean! - """ - Move graph from a path on the server to a new_path on the server. - """ - moveGraph(path: String!, newPath: String!, overwrite: Boolean): Boolean! - """ - Copy graph from a path on the server to a new_path on the server. - """ - copyGraph(path: String!, newPath: String!, overwrite: Boolean): Boolean! - """ - Upload a graph file from a path on the client using GQL multipart uploading. - - Returns:: - name of the new graph - """ - uploadGraph(path: String!, graph: Upload!, overwrite: Boolean!): String! - """ - Send graph bincode as base64 encoded string. - - Returns:: - path of the new graph - """ - sendGraph(path: String!, graph: String!, overwrite: Boolean!): String! - """ - Returns a subgraph given a set of nodes from an existing graph in the server. - - Returns:: - name of the new graph - """ - createSubgraph(parentPath: String!, nodes: [String!]!, newPath: String!, overwrite: Boolean!): String! - """ - (Experimental) Creates search index. - """ - createIndex(path: String!, indexSpec: IndexSpecInput, inRam: Boolean!): Boolean! + Permanently delete a stored graph from the server. Requires write + permission on both the graph and its parent namespace. + """ + deleteGraph( + """ + Graph path relative to the root namespace. + """ + path: String! + ): Boolean! + """ + Create a new empty graph at the given path. Errors if a graph already + exists there. + """ + newGraph( + """ + Destination path relative to the root namespace. + """ + path: String!, graphType: GraphType! + ): Boolean! + """ + Move a stored graph to a new path on the server (rename / relocate). + Atomic: copies first, then deletes the source. + """ + moveGraph( + """ + Current graph path relative to the root namespace. + """ + path: String!, newPath: String!, + """ + If true, allow replacing an existing graph at `newPath`; defaults to false. + """ + overwrite: Boolean + ): Boolean! + """ + Duplicate a stored graph to a new path on the server. Source is + preserved. + """ + copyGraph( + """ + Source graph path relative to the root namespace. + """ + path: String!, newPath: String!, + """ + If true, allow replacing an existing graph at `newPath`; defaults to false. + """ + overwrite: Boolean + ): Boolean! + """ + Stream-upload a graph file using GraphQL multipart upload. The client + sends the file directly; the server stores it under `path`. + """ + uploadGraph( + """ + Destination path relative to the root namespace. + """ + path: String!, + """ + Multipart upload of the serialised graph file. + """ + graph: Upload!, + """ + If true, replace any graph already at `path`. + """ + overwrite: Boolean! + ): String! + """ + Send a serialised graph as a base64-encoded string in the request + body. Use for smaller graphs where multipart upload is overkill. + """ + sendGraph( + """ + Destination path relative to the root namespace. + """ + path: String!, + """ + Base64-encoded bincode of the serialised graph. + """ + graph: String!, + """ + If true, replace any graph already at `path`. + """ + overwrite: Boolean! + ): String! + """ + Persist a subgraph of an existing stored graph as a new graph. The + subgraph contains only the listed nodes and edges between them. + """ + createSubgraph( parentPath: String!, + """ + Node ids to include in the subgraph. + """ + nodes: [NodeId!]!, newPath: String!, + """ + If true, replace any graph already at `newPath`. + """ + overwrite: Boolean! + ): String! + """ + (Experimental) Build a Tantivy search index for a stored graph so it + can be queried via `searchNodes` / `searchEdges`. + """ + createIndex( + """ + Graph path relative to the root namespace. + """ + path: String!, indexSpec: IndexSpecInput, inRam: Boolean! + ): Boolean! } +""" +Write-side handle for a single edge — returned from `addEdge` or +`MutableGraph.edge`. Supports adding updates, deletions, and attaching +or updating metadata. +""" type MutableEdge { """ Use to check if adding the edge was successful. @@ -1821,83 +2758,275 @@ type MutableEdge { """ dst: MutableNode! """ - Mark the edge as deleted at time time. - """ - delete(time: Int!, layer: String): Boolean! - """ - Add metadata to the edge (errors if the value already exists). - - If this is called after add_edge, the layer is inherited from the add_edge and does not - need to be specified again. - """ - addMetadata(properties: [PropertyInput!]!, layer: String): Boolean! - """ - Update metadata of the edge (existing values are overwritten). - - If this is called after add_edge, the layer is inherited from the add_edge and does not - need to be specified again. - """ - updateMetadata(properties: [PropertyInput!]!, layer: String): Boolean! - """ - Add temporal property updates to the edge. - - If this is called after add_edge, the layer is inherited from the add_edge and does not - need to be specified again. - """ - addUpdates(time: Int!, properties: [PropertyInput!], layer: String): Boolean! + Mark this edge as deleted at the given time. Persistent graphs treat this + as a tombstone (the edge becomes invalid from `time` onwards); event + graphs simply log the deletion event. + """ + delete( + """ + Time of the deletion. + """ + time: TimeInput!, + """ + Optional layer name. If omitted, uses the layer the edge was originally added on (when called after `addEdge`). + """ + layer: String + ): Boolean! + """ + Add metadata to this edge. Errors if any of the keys already exists — + use `updateMetadata` to overwrite. If this is called after `addEdge`, + the layer is inherited and does not need to be specified again. + """ + addMetadata( + """ + List of `{key, value}` pairs to set as metadata. + """ + properties: [PropertyInput!]!, + """ + Optional layer name; defaults to the inherited layer. + """ + layer: String + ): Boolean! + """ + Update metadata of this edge, overwriting any existing values for the + given keys. If this is called after `addEdge`, the layer is inherited + and does not need to be specified again. + """ + updateMetadata( + """ + List of `{key, value}` pairs to upsert. + """ + properties: [PropertyInput!]!, + """ + Optional layer name; defaults to the inherited layer. + """ + layer: String + ): Boolean! + """ + Append a property update to this edge at a specific time. If called + after `addEdge`, the layer is inherited and does not need to be + specified again. + """ + addUpdates( + """ + Time of the update. + """ + time: TimeInput!, + """ + Optional `{key, value}` pairs attached to the event. + """ + properties: [PropertyInput!], + """ + Optional layer name; defaults to the inherited layer. + """ + layer: String + ): Boolean! } +""" +Write-side handle for a graph — returned from mutations like `newGraph` or +`updateGraph`. Supports adding nodes and edges (individually or in batches), +attaching properties/metadata, and looking up mutable node/edge handles +(`node`, `edge`). Read `graph` to get a matching read-only view. +""" type MutableGraph { """ - Get the non-mutable graph. + Read-only view of this graph — identical to what you'd get from + `graph(path:)` on the query root. Use this when you want to compose + queries on the graph you've just mutated. """ graph: Graph! """ - Get mutable existing node. - """ - node(name: String!): MutableNode - """ - Add a new node or add updates to an existing node. - """ - addNode(time: Int!, name: String!, properties: [PropertyInput!], nodeType: String, layer: String): MutableNode! - """ - Create a new node or fail if it already exists. - """ - createNode(time: Int!, name: String!, properties: [PropertyInput!], nodeType: String, layer: String): MutableNode! - """ - Add a batch of nodes. - """ - addNodes(nodes: [NodeAddition!]!): Boolean! - """ - Get a mutable existing edge. - """ - edge(src: String!, dst: String!): MutableEdge - """ - Add a new edge or add updates to an existing edge. - """ - addEdge(time: Int!, src: String!, dst: String!, properties: [PropertyInput!], layer: String): MutableEdge! - """ - Add a batch of edges. - """ - addEdges(edges: [EdgeAddition!]!): Boolean! - """ - Mark an edge as deleted (creates the edge if it did not exist). - """ - deleteEdge(time: Int!, src: String!, dst: String!, layer: String): MutableEdge! - """ - Add temporal properties to graph. - """ - addProperties(t: Int!, properties: [PropertyInput!]!): Boolean! - """ - Add metadata to graph (errors if the property already exists). - """ - addMetadata(properties: [PropertyInput!]!): Boolean! - """ - Update metadata of the graph (overwrites existing values). - """ - updateMetadata(properties: [PropertyInput!]!): Boolean! + Look up an existing node for mutation. Returns null if the node doesn't + exist; use `addNode` or `createNode` to create one. + """ + node( + """ + Node id. + """ + name: NodeId! + ): MutableNode + """ + Add a new node or append an update to an existing one. Upsert semantics: + no error if the node already exists — properties and type are merged. + """ + addNode( + """ + Time of the event. + """ + time: TimeInput!, + """ + Node id. + """ + name: NodeId!, + """ + Optional property updates attached to this event. + """ + properties: [PropertyInput!], + """ + Optional node type to assign. If provided, sets the node's type at this event. + """ + nodeType: String, + """ + Optional layer name. If omitted, the default layer is used. + """ + layer: String + ): MutableNode! + """ + Create a new node or fail if it already exists. Strict alternative to + `addNode` — use this when you want to detect collisions. + """ + createNode( + """ + Time of the create event. + """ + time: TimeInput!, + """ + Node id. + """ + name: NodeId!, + """ + Optional property updates attached to this event. + """ + properties: [PropertyInput!], + """ + Optional node type to assign. If provided, sets the node's type at this event. + """ + nodeType: String, + """ + Optional layer name. If omitted, the default layer is used. + """ + layer: String + ): MutableNode! + """ + Batch-add multiple nodes in one call. For each `NodeAddition`, applies every + update it carries (time/properties pairs), then optionally sets its node type + and adds any metadata. On partial failure, returns a `BatchFailures` error + describing which entries failed and why; otherwise returns true. + """ + addNodes( + """ + List of `NodeAddition` inputs, each specifying a node's name, optional type, layer, per-timestamp updates, and metadata. + """ + nodes: [NodeAddition!]! + ): Boolean! + """ + Look up an existing edge for mutation. Returns null if no such edge exists. + """ + edge( + """ + Source node id. + """ + src: NodeId!, + """ + Destination node id. + """ + dst: NodeId! + ): MutableEdge + """ + Add a new edge or append an update to an existing one. Upsert semantics: + safe to call on an edge that already exists — creates missing endpoints if + needed. + """ + addEdge( + """ + Time of the event. + """ + time: TimeInput!, + """ + Source node id. + """ + src: NodeId!, + """ + Destination node id. + """ + dst: NodeId!, + """ + Optional property updates attached to this event. + """ + properties: [PropertyInput!], + """ + Optional layer name. If omitted, the default layer is used. + """ + layer: String + ): MutableEdge! + """ + Batch-add multiple edges in one call. For each `EdgeAddition`, applies every + update it carries, then adds any metadata. On partial failure, returns a + `BatchFailures` error describing which entries failed; otherwise returns + true. + """ + addEdges( + """ + List of `EdgeAddition` inputs, each specifying an edge's `src`, `dst`, optional layer, per-timestamp updates, and metadata. + """ + edges: [EdgeAddition!]! + ): Boolean! + """ + Mark an edge as deleted at the given time. Persistent graphs treat this + as a tombstone (the edge becomes invalid from `time` onwards); event + graphs simply log the deletion event. Creates the edge first if it did + not exist. + """ + deleteEdge( + """ + Time of the deletion. + """ + time: TimeInput!, + """ + Source node id. + """ + src: NodeId!, + """ + Destination node id. + """ + dst: NodeId!, + """ + Optional layer name. If omitted, the default layer is used. + """ + layer: String + ): MutableEdge! + """ + Add temporal properties to the graph itself (not a node or edge). Each + call records a property update at `t`. + """ + addProperties( + """ + Time of the update. + """ + t: TimeInput!, + """ + List of `{key, value}` pairs to set. + """ + properties: [PropertyInput!]! + ): Boolean! + """ + Add metadata to the graph itself. Errors if any of the keys already + exists — use `updateMetadata` to overwrite. + """ + addMetadata( + """ + List of `{key, value}` pairs to set as metadata. + """ + properties: [PropertyInput!]! + ): Boolean! + """ + Update metadata of the graph itself, overwriting any existing values for + the given keys. + """ + updateMetadata( + """ + List of `{key, value}` pairs to upsert. + """ + properties: [PropertyInput!]! + ): Boolean! } +""" +Write-side handle for a single node — returned from `addNode`, `createNode`, +or `MutableGraph.node`. Supports adding updates, setting node type, and +attaching or updating metadata. +""" type MutableNode { """ Use to check if adding the node was successful. @@ -1908,32 +3037,90 @@ type MutableNode { """ node: Node! """ - Add metadata to the node (errors if the property already exists). - """ - addMetadata(properties: [PropertyInput!]!): Boolean! - """ - Set the node type (errors if the node already has a non-default type). - """ - setNodeType(newType: String!): Boolean! - """ - Update metadata of the node (overwrites existing property values). - """ - updateMetadata(properties: [PropertyInput!]!): Boolean! - """ - Add temporal property updates to the node. - """ - addUpdates(time: Int!, properties: [PropertyInput!], layer: String): Boolean! + Add metadata to this node. Errors if any of the keys already exists — + use `updateMetadata` to overwrite. + """ + addMetadata( + """ + List of `{key, value}` pairs to set as metadata. + """ + properties: [PropertyInput!]! + ): Boolean! + """ + Set this node's type. Errors if the node already has a non-default + type and you're trying to change it. + """ + setNodeType( + """ + Node-type name to assign. + """ + newType: String! + ): Boolean! + """ + Update metadata of this node, overwriting any existing values for the + given keys. + """ + updateMetadata( + """ + List of `{key, value}` pairs to upsert. + """ + properties: [PropertyInput!]! + ): Boolean! + """ + Append a property update to this node at a specific time. + """ + addUpdates( + """ + Time of the update. + """ + time: TimeInput!, + """ + Optional `{key, value}` pairs attached to the event. + """ + properties: [PropertyInput!], + """ + Optional layer name. If omitted, the default layer is used. + """ + layer: String + ): Boolean! } type MutationPlugin { NoOps: String! } +""" +A directory-like container for graphs and nested namespaces. Graphs are +addressed by path (e.g. `"team/project/graph"`), and every segment except +the last is a namespace. Use to browse what's stored on the server without +loading any graph data. +""" type Namespace { + """ + Graphs directly inside this namespace (excludes graphs in nested + namespaces). Filtered by the caller's permissions — only graphs the + caller is allowed to see are returned. + """ graphs: CollectionOfMetaGraph! + """ + Path of this namespace relative to the root namespace. Empty string for + the root namespace itself. + """ path: String! + """ + Parent namespace, or null at the root. + """ parent: Namespace + """ + Sub-namespaces directly inside this one (one level down, not recursive). + Filtered by permissions. + """ children: CollectionOfNamespace! + """ + Everything in this namespace — sub-namespaces and graphs — as a single + heterogeneous collection. Sub-namespaces are listed before graphs. + Filtered by permissions. + """ items: CollectionOfNamespacedItem! } @@ -1944,9 +3131,10 @@ Raphtory graph node. """ type Node { """ - Returns the unique id of the node. + Returns the unique id of the node — `String` for string-indexed + graphs, non-negative `Int` for integer-indexed graphs. """ - id: String! + id: NodeId! """ Returns the name of the node. """ @@ -1958,19 +3146,39 @@ type Node { """ Return a view of node containing all layers specified. """ - layers(names: [String!]!): Node! + layers( + """ + Layer names to include. + """ + names: [String!]! + ): Node! """ Returns a collection containing nodes belonging to all layers except the excluded list of layers. """ - excludeLayers(names: [String!]!): Node! + excludeLayers( + """ + Layer names to exclude. + """ + names: [String!]! + ): Node! """ Returns a collection containing nodes belonging to the specified layer. """ - layer(name: String!): Node! + layer( + """ + Layer name to include. + """ + name: String! + ): Node! """ Returns a collection containing nodes belonging to all layers except the excluded layer. """ - excludeLayer(name: String!): Node! + excludeLayer( + """ + Layer name to exclude. + """ + name: String! + ): Node! """ Creates a WindowSet with the specified window size and optional step using a rolling window. @@ -1982,7 +3190,20 @@ type Node { Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before the start of the first window and/or after the end of the last window (i.e. not included in any window). """ - rolling(window: WindowDuration!, step: WindowDuration, alignmentUnit: AlignmentUnit): NodeWindowSet! + rolling( + """ + Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`). + """ + window: WindowDuration!, + """ + Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap. + """ + step: WindowDuration, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set). + """ + alignmentUnit: AlignmentUnit + ): NodeWindowSet! """ Creates a WindowSet with the specified step size using an expanding window. @@ -1992,15 +3213,38 @@ type Node { If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. e.g. "1 month and 1 day" will align at the start of the day. """ - expanding(step: WindowDuration!, alignmentUnit: AlignmentUnit): NodeWindowSet! + expanding( + """ + How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`). + """ + step: WindowDuration!, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`. + """ + alignmentUnit: AlignmentUnit + ): NodeWindowSet! """ Create a view of the node including all events between the specified start (inclusive) and end (exclusive). """ - window(start: TimeInput!, end: TimeInput!): Node! + window( + """ + Inclusive lower bound. + """ + start: TimeInput!, + """ + Exclusive upper bound. + """ + end: TimeInput! + ): Node! """ Create a view of the node including all events at a specified time. """ - at(time: TimeInput!): Node! + at( + """ + Instant to pin the view to. + """ + time: TimeInput! + ): Node! """ Create a view of the node including all events at the latest time. """ @@ -2008,7 +3252,12 @@ type Node { """ Create a view of the node including all events that are valid at the specified time. """ - snapshotAt(time: TimeInput!): Node! + snapshotAt( + """ + Instant at which entities must be valid. + """ + time: TimeInput! + ): Node! """ Create a view of the node including all events that are valid at the latest time. """ @@ -2016,23 +3265,52 @@ type Node { """ Create a view of the node including all events before specified end time (exclusive). """ - before(time: TimeInput!): Node! + before( + """ + Exclusive upper bound. + """ + time: TimeInput! + ): Node! """ Create a view of the node including all events after the specified start time (exclusive). """ - after(time: TimeInput!): Node! + after( + """ + Exclusive lower bound. + """ + time: TimeInput! + ): Node! """ Shrink a Window to a specified start and end time, if these are earlier and later than the current start and end respectively. """ - shrinkWindow(start: TimeInput!, end: TimeInput!): Node! + shrinkWindow( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput!, + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): Node! """ Set the start of the window to the larger of a specified start time and self.start(). """ - shrinkStart(start: TimeInput!): Node! + shrinkStart( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput! + ): Node! """ Set the end of the window to the smaller of a specified end and self.end(). """ - shrinkEnd(end: TimeInput!): Node! + shrinkEnd( + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): Node! applyViews(views: [NodeViewCollection!]!): Node! """ Returns the earliest time that the node exists. @@ -2125,9 +3403,9 @@ type Node { input NodeAddition { """ - Name. + Node id (string or non-negative integer). """ - name: String! + name: NodeId! """ Node type. """ @@ -2338,6 +3616,13 @@ input NodeFilter @oneOf { isActive: Boolean } +""" +Identifier for a node — either a string (`"alice"`) or a non-negative +integer (`42`). Use whichever form matches how the graph was indexed +when nodes were added. +""" +scalar NodeId + """ Restricts node evaluation to one or more layers and applies a nested `NodeFilter`. @@ -2354,12 +3639,27 @@ input NodeLayersExpr { expr: NodeFilter! } +""" +Describes the shape of nodes of a specific type in a graph — its property +keys and observed value types (and, for string-valued properties, the set +of distinct values seen). One `NodeSchema` per node type. +""" type NodeSchema { + """ + The node type this schema describes (e.g. `"person"`, `"org"`). + Falls back to the default node type for untyped nodes. + """ typeName: String! """ - Returns the list of property schemas for this node + Property schemas seen on nodes of this type — one entry per property key + ever set on a node of this type, with its observed `PropertyType` and (for + string-valued properties) the set of distinct values. """ properties: [PropertySchema!]! + """ + Metadata schemas seen on nodes of this type — same shape as `properties` + but covering the metadata surface (constant, not temporal). + """ metadata: [PropertySchema!]! } @@ -2495,7 +3795,16 @@ input NodeWindowExpr { expr: NodeFilter! } +""" +A lazy sequence of per-window views of a single node, produced by +`node.rolling` / `node.expanding`. Each entry is the node as it exists in +that window. +""" type NodeWindowSet { + """ + Number of windows in this set. Materialising all windows is expensive for + large graphs — prefer `page` over `list` when iterating. + """ count: Int! """ Fetch one page with a number of items up to a specified limit, optionally offset by a specified amount. @@ -2504,10 +3813,32 @@ type NodeWindowSet { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Node!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Node!]! + """ + Materialise every window as a list. Rejected by the server when bulk list + endpoints are disabled; use `page` for paginated access instead. + """ list: [Node!]! } +""" +A lazy collection of nodes from a graph view. Supports all the same view +transforms as `Graph` (window, layer, filter, ...) plus pagination and +sorting. Iterated via `list` / `page` / `ids` / `count`. +""" type Nodes { """ Return a view of the nodes containing only the default edge layer. @@ -2516,19 +3847,39 @@ type Nodes { """ Return a view of the nodes containing all layers specified. """ - layers(names: [String!]!): Nodes! + layers( + """ + Layer names to include. + """ + names: [String!]! + ): Nodes! """ Return a view of the nodes containing all layers except those specified. """ - excludeLayers(names: [String!]!): Nodes! + excludeLayers( + """ + Layer names to exclude. + """ + names: [String!]! + ): Nodes! """ Return a view of the nodes containing the specified layer. """ - layer(name: String!): Nodes! + layer( + """ + Layer name to include. + """ + name: String! + ): Nodes! """ Return a view of the nodes containing all layers except those specified. """ - excludeLayer(name: String!): Nodes! + excludeLayer( + """ + Layer name to exclude. + """ + name: String! + ): Nodes! """ Creates a WindowSet with the specified window size and optional step using a rolling window. @@ -2540,7 +3891,20 @@ type Nodes { Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before the start of the first window and/or after the end of the last window (i.e. not included in any window). """ - rolling(window: WindowDuration!, step: WindowDuration, alignmentUnit: AlignmentUnit): NodesWindowSet! + rolling( + """ + Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`). + """ + window: WindowDuration!, + """ + Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap. + """ + step: WindowDuration, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set). + """ + alignmentUnit: AlignmentUnit + ): NodesWindowSet! """ Creates a WindowSet with the specified step size using an expanding window. @@ -2550,15 +3914,38 @@ type Nodes { If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. e.g. "1 month and 1 day" will align at the start of the day. """ - expanding(step: WindowDuration!, alignmentUnit: AlignmentUnit): NodesWindowSet! + expanding( + """ + How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`). + """ + step: WindowDuration!, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`. + """ + alignmentUnit: AlignmentUnit + ): NodesWindowSet! """ Create a view of the node including all events between the specified start (inclusive) and end (exclusive). """ - window(start: TimeInput!, end: TimeInput!): Nodes! + window( + """ + Inclusive lower bound. + """ + start: TimeInput!, + """ + Exclusive upper bound. + """ + end: TimeInput! + ): Nodes! """ Create a view of the nodes including all events at a specified time. """ - at(time: TimeInput!): Nodes! + at( + """ + Instant to pin the view to. + """ + time: TimeInput! + ): Nodes! """ Create a view of the nodes including all events at the latest time. """ @@ -2566,7 +3953,12 @@ type Nodes { """ Create a view of the nodes including all events that are valid at the specified time. """ - snapshotAt(time: TimeInput!): Nodes! + snapshotAt( + """ + Instant at which entities must be valid. + """ + time: TimeInput! + ): Nodes! """ Create a view of the nodes including all events that are valid at the latest time. """ @@ -2574,29 +3966,82 @@ type Nodes { """ Create a view of the nodes including all events before specified end time (exclusive). """ - before(time: TimeInput!): Nodes! + before( + """ + Exclusive upper bound. + """ + time: TimeInput! + ): Nodes! """ Create a view of the nodes including all events after the specified start time (exclusive). """ - after(time: TimeInput!): Nodes! + after( + """ + Exclusive lower bound. + """ + time: TimeInput! + ): Nodes! """ Shrink both the start and end of the window. """ - shrinkWindow(start: TimeInput!, end: TimeInput!): Nodes! + shrinkWindow( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput!, + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): Nodes! """ Set the start of the window to the larger of a specified start time and self.start(). """ - shrinkStart(start: TimeInput!): Nodes! + shrinkStart( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput! + ): Nodes! """ Set the end of the window to the smaller of a specified end and self.end(). """ - shrinkEnd(end: TimeInput!): Nodes! + shrinkEnd( + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): Nodes! """ Filter nodes by node type. """ - typeFilter(nodeTypes: [String!]!): Nodes! - applyViews(views: [NodesViewCollection!]!): Nodes! - sorted(sortBys: [NodeSortBy!]!): Nodes! + typeFilter( + """ + Node-type names to keep. + """ + nodeTypes: [String!]! + ): Nodes! + """ + Apply a list of views in the given order and return the resulting nodes + collection. Lets callers compose window, layer, filter, and snapshot + operations in a single call. + """ + applyViews( + """ + Ordered list of view operations; each entry is a one-of variant (`window`, `layer`, `filter`, etc.) applied to the running result. + """ + views: [NodesViewCollection!]! + ): Nodes! + """ + Sort the nodes. Multiple criteria are applied lexicographically (ties on the + first key break to the second, etc.). + """ + sorted( + """ + Ordered list of sort keys. Each entry chooses exactly one of `id` / `time` / `property`, with an optional `reverse: true` to flip order. + """ + sortBys: [NodeSortBy!]! + ): Nodes! """ Returns the start time of the window. Errors if there is no window. """ @@ -2605,6 +4050,9 @@ type Nodes { Returns the end time of the window. Errors if there is no window. """ end: EventTime! + """ + Number of nodes in the current view. + """ count: Int! """ Fetch one page with a number of items up to a specified limit, optionally offset by a specified amount. @@ -2613,20 +4061,78 @@ type Nodes { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Node!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Node!]! + """ + Materialise every node in the view. Rejected by the server when bulk list + endpoints are disabled; use `page` for paginated access instead. + """ list: [Node!]! """ - Returns a view of the node ids. + Every node's id (name) as a flat list of strings. Rejected by the server when + bulk list endpoints are disabled. """ ids: [String!]! """ - Returns a filtered view that applies to list down the chain - """ - filter(expr: NodeFilter!): Nodes! + Narrow the collection to nodes matching `expr`. The filter sticks to the + returned view — every subsequent traversal through these nodes (their + neighbours, edges, properties) continues to see the filtered scope. + + Useful when you want one scoping rule to apply across the whole query. + E.g. restricting everything to a specific week: + + ```text + nodes { filter(expr: {window: {start: 1234, end: 5678}}) { + list { neighbours { list { name } } } # neighbours still windowed + } } + ``` + + Contrast with `select`, which applies here and is not carried through. """ - Returns filtered list of nodes + filter( + """ + Composite node filter (by name, property, type, etc.). + """ + expr: NodeFilter! + ): Nodes! """ - select(expr: NodeFilter!): Nodes! + Narrow the collection to nodes matching `expr`, but only at this step — + subsequent traversals out of these nodes see the unfiltered graph again. + + Useful when you want different scopes at different hops. E.g. nodes + active on Monday, then their neighbours active on Tuesday, then *those* + neighbours active on Wednesday: + + ```text + nodes { select(expr: {window: {...monday...}}) { + list { neighbours { select(expr: {window: {...tuesday...}}) { + list { neighbours { select(expr: {window: {...wednesday...}}) { + list { name } + } } } + } } } + } } + ``` + + Contrast with `filter`, which persists the scope through subsequent ops. + """ + select( + """ + Composite node filter (by name, property, type, etc.). + """ + expr: NodeFilter! + ): Nodes! } input NodesViewCollection @oneOf { @@ -2696,7 +4202,16 @@ input NodesViewCollection @oneOf { typeFilter: [String!] } +""" +A lazy sequence of per-window node collections, produced by +`nodes.rolling` / `nodes.expanding`. Each entry is a `Nodes` collection +as it exists in that window. +""" type NodesWindowSet { + """ + Number of windows in this set. Materialising all windows is expensive for + large graphs — prefer `page` over `list` when iterating. + """ count: Int! """ Fetch one page with a number of items up to a specified limit, optionally offset by a specified amount. @@ -2705,7 +4220,24 @@ type NodesWindowSet { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Nodes!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Nodes!]! + """ + Materialise every window as a list. Rejected by the server when bulk list + endpoints are disabled; use `page` for paginated access instead. + """ list: [Nodes!]! } @@ -2736,23 +4268,49 @@ type PagerankOutput { rank: Float! } +""" +A collection of nodes anchored to a source node — the result of traversals +like `node.neighbours`, `inNeighbours`, or `outNeighbours`. Supports all +the usual view transforms (window, layer, filter, ...) and can be chained +to walk further hops. +""" type PathFromNode { """ Returns a view of PathFromNode containing the specified layer, errors if the layer does not exist. """ - layers(names: [String!]!): PathFromNode! + layers( + """ + Layer names to include. + """ + names: [String!]! + ): PathFromNode! """ Return a view of PathFromNode containing all layers except the specified excluded layers, errors if any of the layers do not exist. """ - excludeLayers(names: [String!]!): PathFromNode! + excludeLayers( + """ + Layer names to exclude. + """ + names: [String!]! + ): PathFromNode! """ Return a view of PathFromNode containing the layer specified layer, errors if the layer does not exist. """ - layer(name: String!): PathFromNode! + layer( + """ + Layer name to include. + """ + name: String! + ): PathFromNode! """ Return a view of PathFromNode containing all layers except the specified excluded layers, errors if any of the layers do not exist. """ - excludeLayer(name: String!): PathFromNode! + excludeLayer( + """ + Layer name to exclude. + """ + name: String! + ): PathFromNode! """ Creates a WindowSet with the given window size and optional step using a rolling window. @@ -2764,7 +4322,20 @@ type PathFromNode { Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before the start of the first window and/or after the end of the last window (i.e. not included in any window). """ - rolling(window: WindowDuration!, step: WindowDuration, alignmentUnit: AlignmentUnit): PathFromNodeWindowSet! + rolling( + """ + Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`). + """ + window: WindowDuration!, + """ + Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap. + """ + step: WindowDuration, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set). + """ + alignmentUnit: AlignmentUnit + ): PathFromNodeWindowSet! """ Creates a WindowSet with the given step size using an expanding window. @@ -2774,15 +4345,38 @@ type PathFromNode { If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. e.g. "1 month and 1 day" will align at the start of the day. """ - expanding(step: WindowDuration!, alignmentUnit: AlignmentUnit): PathFromNodeWindowSet! + expanding( + """ + How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`). + """ + step: WindowDuration!, + """ + Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`. + """ + alignmentUnit: AlignmentUnit + ): PathFromNodeWindowSet! """ Create a view of the PathFromNode including all events between a specified start (inclusive) and end (exclusive). """ - window(start: TimeInput!, end: TimeInput!): PathFromNode! + window( + """ + Inclusive lower bound. + """ + start: TimeInput!, + """ + Exclusive upper bound. + """ + end: TimeInput! + ): PathFromNode! """ Create a view of the PathFromNode including all events at time. """ - at(time: TimeInput!): PathFromNode! + at( + """ + Instant to pin the view to. + """ + time: TimeInput! + ): PathFromNode! """ Create a view of the PathFromNode including all events that are valid at the latest time. """ @@ -2790,7 +4384,12 @@ type PathFromNode { """ Create a view of the PathFromNode including all events that are valid at the specified time. """ - snapshotAt(time: TimeInput!): PathFromNode! + snapshotAt( + """ + Instant at which entities must be valid. + """ + time: TimeInput! + ): PathFromNode! """ Create a view of the PathFromNode including all events at the latest time. """ @@ -2798,27 +4397,61 @@ type PathFromNode { """ Create a view of the PathFromNode including all events before the specified end (exclusive). """ - before(time: TimeInput!): PathFromNode! + before( + """ + Exclusive upper bound. + """ + time: TimeInput! + ): PathFromNode! """ Create a view of the PathFromNode including all events after the specified start (exclusive). """ - after(time: TimeInput!): PathFromNode! + after( + """ + Exclusive lower bound. + """ + time: TimeInput! + ): PathFromNode! """ Shrink both the start and end of the window. """ - shrinkWindow(start: TimeInput!, end: TimeInput!): PathFromNode! + shrinkWindow( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput!, + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): PathFromNode! """ Set the start of the window to the larger of the specified start and self.start(). """ - shrinkStart(start: TimeInput!): PathFromNode! + shrinkStart( + """ + Proposed new start (TimeInput); ignored if it would widen the window. + """ + start: TimeInput! + ): PathFromNode! """ Set the end of the window to the smaller of the specified end and self.end(). """ - shrinkEnd(end: TimeInput!): PathFromNode! + shrinkEnd( + """ + Proposed new end (TimeInput); ignored if it would widen the window. + """ + end: TimeInput! + ): PathFromNode! """ - Filter nodes by type. + Narrow this path to neighbours whose node type is in the given set. """ - typeFilter(nodeTypes: [String!]!): PathFromNode! + typeFilter( + """ + Node types to keep. + """ + nodeTypes: [String!]! + ): PathFromNode! """ Returns the earliest time that this PathFromNode is valid or None if the PathFromNode is valid for all times. """ @@ -2827,6 +4460,9 @@ type PathFromNode { Returns the latest time that this PathFromNode is valid or None if the PathFromNode is valid for all times. """ end: EventTime! + """ + Number of neighbour nodes reachable from the source in this view. + """ count: Int! """ Fetch one page with a number of items up to a specified limit, optionally offset by a specified amount. @@ -2835,24 +4471,84 @@ type PathFromNode { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [Node!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [Node!]! + """ + Materialise every neighbour node in the path. Rejected by the server when + bulk list endpoints are disabled; use `page` for paginated access instead. + """ list: [Node!]! """ - Returns the node ids. + Every neighbour node's id (name) as a flat list of strings. Rejected by the + server when bulk list endpoints are disabled. """ ids: [String!]! """ Takes a specified selection of views and applies them in given order. """ - applyViews(views: [PathFromNodeViewCollection!]!): PathFromNode! - """ - Returns a filtered view that applies to list down the chain + applyViews( + """ + Ordered list of view operations; each entry is a one-of variant (`window`, `layer`, `filter`, ...) applied to the running result. + """ + views: [PathFromNodeViewCollection!]! + ): PathFromNode! """ - filter(expr: NodeFilter!): PathFromNode! + Narrow the neighbour set to nodes matching `expr`. The filter sticks to + the returned path — every subsequent traversal (further hops, edges, + properties) continues to see the filtered scope. + + Useful when you want one scoping rule to apply across the whole query. + E.g. restricting the whole traversal to a specific week: + + ```text + node(name: "A") { neighbours { filter(expr: {window: {...week...}}) { + list { neighbours { list { name } } } # further hops still windowed + } } } + ``` + + Contrast with `select`, which applies here and is not carried through. """ - Returns filtered list of neighbour nodes + filter( + """ + Composite node filter (by name, property, type, etc.). + """ + expr: NodeFilter! + ): PathFromNode! """ - select(expr: NodeFilter!): PathFromNode! + Narrow the neighbour set to nodes matching `expr`, but only at this hop + — further traversals out of these nodes see the unfiltered graph again. + + Useful when each hop needs a different scope. E.g. neighbours active on + Monday, then *their* neighbours active on Tuesday: + + ```text + node(name: "A") { neighbours { select(expr: {window: {...monday...}}) { + list { neighbours { select(expr: {window: {...tuesday...}}) { + list { name } + } } } + } } } + ``` + + Contrast with `filter`, which persists the scope through subsequent ops. + """ + select( + """ + Composite node filter (by name, property, type, etc.). + """ + expr: NodeFilter! + ): PathFromNode! } input PathFromNodeViewCollection @oneOf { @@ -2910,7 +4606,16 @@ input PathFromNodeViewCollection @oneOf { shrinkEnd: TimeInput } +""" +A lazy sequence of per-window neighbour sets, produced by +`neighbours.rolling` / `neighbours.expanding` (or the in/out variants). +Each entry is a `PathFromNode` scoped to that window. +""" type PathFromNodeWindowSet { + """ + Number of windows in this set. Materialising all windows is expensive for + large graphs — prefer `page` over `list` when iterating. + """ count: Int! """ Fetch one page with a number of items up to a specified limit, optionally offset by a specified amount. @@ -2919,7 +4624,24 @@ type PathFromNodeWindowSet { For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), will be returned. """ - page(limit: Int!, offset: Int, pageIndex: Int): [PathFromNode!]! + page( + """ + Maximum number of items to return on this page. + """ + limit: Int!, + """ + Extra items to skip on top of `pageIndex` paging (default 0). + """ + offset: Int, + """ + Zero-based page number; multiplies `limit` to determine where to start (default 0). + """ + pageIndex: Int + ): [PathFromNode!]! + """ + Materialise every window as a list. Rejected by the server when bulk list + endpoints are disabled; use `page` for paginated access instead. + """ list: [PathFromNode!]! } @@ -3053,29 +4775,73 @@ input PropCondition @oneOf { len: PropCondition } +""" +All properties (temporal + constant) of an entity, excluding metadata. +Look up individual properties via `get` / `contains`, enumerate via +`keys` / `values`, or drop down to `temporal` for the time-aware surface. +""" type Properties { """ - Get property value matching the specified key. + Look up a single property by key. Returns null if no property with that key + exists in the current view. """ - get(key: String!): Property + get( + """ + The property name. + """ + key: String! + ): Property """ - Check if the key is in the properties. + Returns true if a property with the given key exists in this view. """ - contains(key: String!): Boolean! + contains( + """ + The property name to look up. + """ + key: String! + ): Boolean! """ - Return all property keys. + All property keys present in the current view. Does not include metadata + — metadata is a separate surface accessed via the entity's `metadata` + field. """ keys: [String!]! """ - Return all property values. + Snapshot of property values, one `{key, value}` entry per property. + """ + values( + """ + Optional whitelist. If provided, only properties with these keys are returned; if omitted or null, every property in the view is returned. + """ + keys: [String!] + ): [Property!]! + """ + The temporal-only view of these properties — excludes metadata (which has no + history) and lets you drill into per-key timelines and aggregates. """ - values(keys: [String!]): [Property!]! temporal: TemporalProperties! } +""" +A single `(key, value)` property reading at a point in the graph view. +The value is exposed both as a typed scalar (`value`) and as a +human-readable string (`asString`). +""" type Property { + """ + The property key (name). + """ key: String! + """ + The property value rendered as a human-readable string (e.g. `"10"`, `"hello"`, + `"2024-01-01T00:00:00Z"`). For programmatic access use `value`, which returns + a typed scalar. + """ asString: String! + """ + The property value as a typed `PropertyOutput` scalar — numbers come back as + numbers, booleans as booleans, strings as strings, etc. + """ value: PropertyOutput! } @@ -3126,9 +4892,25 @@ type PropertySchema { variants: [String!]! } +""" +A `(time, value)` pair — the output type of temporal-property accessors +that need to report *when* a value was observed (e.g. `min`, `max`, +`median`, `orderedDedupe`). +""" type PropertyTuple { + """ + The timestamp at which this value was recorded. + """ time: EventTime! + """ + The value rendered as a human-readable string. For programmatic access use + `value`, which returns a typed scalar. + """ asString: String! + """ + The value as a typed `PropertyOutput` scalar — numbers come back as numbers, + booleans as booleans, etc. + """ value: PropertyOutput! } @@ -3147,38 +4929,85 @@ type QueryPlugin { NoOps: String! } +""" +Top-level read-only query surface. Entry points for loading a graph +(`graph`, `graphMetadata`), browsing stored graphs (`namespaces`, +`namespace`, `root`), encoding one for transport (`receiveGraph`), +inspecting vectorised variants (`vectorisedGraph`), and a few utility +endpoints (`version`, `hello`, `plugins`). +""" type QueryRoot { """ - Hello world demo + Liveness check — returns a static "hello world" string. Useful for + smoke-testing that the GraphQL server is reachable. """ hello: String! """ - Returns a graph - """ - graph(path: String!): Graph - """ - Returns lightweight metadata for a graph (node/edge counts, timestamps) without loading it. - Requires at least INTROSPECT permission. - """ - graphMetadata(path: String!): MetaGraph - """ - Update graph query, has side effects to update graph state - - Returns:: GqlMutableGraph - """ - updateGraph(path: String!): MutableGraph! - """ - Update graph query, has side effects to update graph state - - Returns:: GqlMutableGraph - """ - vectoriseGraph(path: String!, model: EmbeddingModel, nodes: Template, edges: Template): Boolean! - """ - Create vectorised graph in the format used for queries - - Returns:: GqlVectorisedGraph - """ - vectorisedGraph(path: String!): VectorisedGraph + Load a graph by path, returning null if the caller lacks read permission or + the graph doesn't exist. When a read-scoped filter is attached to the + caller's permissions, that filter is applied before the graph is returned. + """ + graph( + """ + Graph path relative to the root namespace (e.g. `"master"` or `"team/project/graph"`). + """ + path: String! + ): Graph + """ + Returns lightweight metadata for a graph (node/edge counts, + timestamps) without deserialising the full graph. Requires at least + INTROSPECT permission. + """ + graphMetadata( + """ + Graph path relative to the root namespace. + """ + path: String! + ): MetaGraph + """ + Open a graph for writing — returns a `MutableGraph` handle that can + add nodes/edges/properties/metadata. Requires write permission. + """ + updateGraph( + """ + Graph path relative to the root namespace. + """ + path: String! + ): MutableGraph! + """ + Compute and persist embeddings for the nodes and edges of a stored + graph so it can be queried via `vectorisedGraph`. Requires JWT write + access. + """ + vectoriseGraph( + """ + Graph path relative to the root namespace. + """ + path: String!, + """ + Optional embedding model; defaults to OpenAI's standard model. + """ + model: EmbeddingModel, + """ + Optional node-document template (which fields go into each node's text representation); defaults to the built-in template. + """ + nodes: Template, + """ + Optional edge-document template; defaults to the built-in template. + """ + edges: Template + ): Boolean! + """ + Open a previously-vectorised graph for similarity queries. Returns null + if the graph has no embeddings (call `vectoriseGraph` first) or the + caller lacks read permission. + """ + vectorisedGraph( + """ + Graph path relative to the root namespace. + """ + path: String! + ): VectorisedGraph """ Returns all namespaces using recursive search @@ -3186,11 +5015,15 @@ type QueryRoot { """ namespaces: CollectionOfNamespace! """ - Returns a specific namespace at a given path - - Returns:: Namespace or error if no namespace found + Return a specific namespace by path. Errors if no namespace exists at + that path. """ - namespace(path: String!): Namespace! + namespace( + """ + Namespace path relative to the root namespace (e.g. `"team/project"`). + """ + path: String! + ): Namespace! """ Returns root namespace @@ -3198,16 +5031,25 @@ type QueryRoot { """ root: Namespace! """ - Returns a plugin. + Entry point for read-only plugins registered with the server (e.g. graph + algorithms exposed as queries). Available plugins are defined at server + startup via the plugin registry. """ plugins: QueryPlugin! """ - Encodes graph and returns as string. - If the caller has filtered access, the returned graph is a materialized view of the filter. - - Returns:: Base64 url safe encoded string + Encode a stored graph as a base64 string for client-side download. If + the caller has filtered read access, only the materialised filtered + view is encoded. + """ + receiveGraph( + """ + Graph path relative to the root namespace. + """ + path: String! + ): String! + """ + Version string of the running `raphtory-graphql` server build. """ - receiveGraph(path: String!): String! version: String! } @@ -3252,39 +5094,98 @@ input Template @oneOf { custom: String } +""" +The temporal-only view of an entity's properties. Each entry is a +`TemporalProperty` carrying the full timeline for that key — use this when +you need per-update iteration, time-indexed lookups, or aggregates. +""" type TemporalProperties { """ - Get property value matching the specified key. + Look up a single temporal property by key. Returns null if there's no temporal + property with that key. """ - get(key: String!): TemporalProperty + get( + """ + The property name. + """ + key: String! + ): TemporalProperty """ - Check if the key is in the properties. + Returns true if a temporal property with the given key exists. """ - contains(key: String!): Boolean! + contains( + """ + The property name to look up. + """ + key: String! + ): Boolean! """ - Return all property keys. + All temporal-property keys present in this view. """ keys: [String!]! """ - Return all property values. + All temporal properties, each as a `TemporalProperty` with its full timeline + available. Use `history`, `values`, `latest`, `at`, etc. on each entry. """ - values(keys: [String!]): [TemporalProperty!]! + values( + """ + Optional whitelist. If provided, only temporal properties with these keys are returned; if omitted, every temporal property in the view is returned. + """ + keys: [String!] + ): [TemporalProperty!]! } +""" +The full timeline of a single property key on one entity. Exposes every +update (via `values` / `history` / `orderedDedupe`), point lookups (`at`, +`latest`), and aggregates over the timeline (`sum`, `mean`, `min`, `max`, +`median`, `count`). +""" type TemporalProperty { """ - Key of a property. + The property key (name). """ key: String! + """ + Event history for this property — one entry per temporal update, in + insertion order. Use this to navigate the full timeline: access the + raw `timestamps` / `datetimes` / `eventId` lists, analyse gaps between + updates via `intervals` (mean/median/min/max), ask `isEmpty`, or + paginate the events. + """ history: History! """ - Return the values of the properties. + All values this property has ever taken, in temporal order (one per update). + Typed as `PropertyOutput` so numeric values stay numeric. """ values: [PropertyOutput!]! - at(t: TimeInput!): PropertyOutput + """ + The value at or before time `t` (latest update on or before `t`). Returns null + if no update exists on or before `t`. + """ + at( + """ + A TimeInput (epoch millis integer, RFC3339 string, or `{timestamp, eventId}` object). + """ + t: TimeInput! + ): PropertyOutput + """ + The most recent value, or null if the property has never been set in this view. + """ latest: PropertyOutput + """ + The set of distinct values this property has ever taken (order not guaranteed). + """ unique: [PropertyOutput!]! - orderedDedupe(latestTime: Boolean!): [PropertyTuple!]! + """ + Collapses runs of consecutive-equal updates into a single `(time, value)` pair. + """ + orderedDedupe( + """ + If true, each run is represented by its *last* timestamp; if false, by its *first*. Useful for compressing chatter in a timeline. + """ + latestTime: Boolean! + ): [PropertyTuple!]! """ Sum of all updates. Returns null if the dtype is not additive or the property is empty. """ @@ -3295,7 +5196,7 @@ type TemporalProperty { """ mean: PropertyOutput """ - Alias for `mean`. + Alias for `mean` — same F64 average, same null cases. """ average: PropertyOutput """ @@ -3314,16 +5215,17 @@ type TemporalProperty { """ median: PropertyTuple """ - Number of updates. + Number of updates recorded for this property in the current view. """ count: Int! } input TemporalPropertyInput { """ - Time. + Time of the update — accepts the same forms as `TimeInput` (epoch + millis Int, RFC3339 string, or `{timestamp, eventId}` object). """ - time: Int! + time: TimeInput! """ Properties. """ @@ -3406,6 +5308,12 @@ input Value @oneOf { decimal: String } +""" +A working set of documents / nodes / edges built up via similarity +searches on a `VectorisedGraph`. Selections are mutable: you can grow +them with more hops (`expand*`), dereference the contents (`nodes`, +`edges`, `getDocuments`), or start fresh with `emptySelection`. +""" type VectorSelection { """ Returns a list of nodes in the current selection. @@ -3420,40 +5328,109 @@ type VectorSelection { """ getDocuments: [Document!]! """ - Adds all the documents associated with the specified nodes to the current selection. - - Documents added by this call are assumed to have a score of 0. - """ - addNodes(nodes: [String!]!): VectorSelection! - """ - Adds all the documents associated with the specified edges to the current selection. - - Documents added by this call are assumed to have a score of 0. - """ - addEdges(edges: [InputEdge!]!): VectorSelection! - """ - Add all the documents a specified number of hops away to the selection. - - Two documents A and B are considered to be 1 hop away of each other if they are on the same entity or if they are on the same node and edge pair. - """ - expand(hops: Int!, window: VectorisedGraphWindow): VectorSelection! - """ - Adds documents, from the set of one hop neighbours to the current selection, to the selection based on their similarity score with the specified query. This function loops so that the set of one hop neighbours expands on each loop and number of documents added is determined by the specified limit. - """ - expandEntitiesBySimilarity(query: String!, limit: Int!, window: VectorisedGraphWindow): VectorSelection! - """ - Add the adjacent nodes with higher score for query to the selection up to a specified limit. This function loops like expand_entities_by_similarity but is restricted to nodes. - """ - expandNodesBySimilarity(query: String!, limit: Int!, window: VectorisedGraphWindow): VectorSelection! - """ - Add the adjacent edges with higher score for query to the selection up to a specified limit. This function loops like expand_entities_by_similarity but is restricted to edges. - """ - expandEdgesBySimilarity(query: String!, limit: Int!, window: VectorisedGraphWindow): VectorSelection! + Add every document associated with the named nodes to the selection. + Documents added this way receive a score of 0 (no similarity ranking). + """ + addNodes( + """ + Node ids whose documents to include. + """ + nodes: [NodeId!]! + ): VectorSelection! + """ + Add every document associated with the named edges to the selection. + Documents added this way receive a score of 0 (no similarity ranking). + """ + addEdges( + """ + List of `{src, dst}` pairs identifying the edges. + """ + edges: [InputEdge!]! + ): VectorSelection! + """ + Grow the selection by including documents that are within `hops` of any + document already in the selection. Two documents are 1 hop apart if + they're on the same entity or on a connected node/edge pair. + """ + expand( + """ + Number of expansion rounds (1 = direct neighbours). + """ + hops: Int!, + """ + Optional `{start, end}` to restrict expansion to entities active in that interval. + """ + window: VectorisedGraphWindow + ): VectorSelection! + """ + Iteratively expand the selection by similarity to a natural-language + query. Each pass takes the one-hop neighbour set of the current + selection and adds the highest-scoring entities (mixed nodes and + edges); the loop continues until `limit` entities have been added. + """ + expandEntitiesBySimilarity( + """ + Natural-language search string; embedded by the server. + """ + query: String!, + """ + Total number of entities to add across all passes. + """ + limit: Int!, + """ + Optional `{start, end}` to restrict matches to entities active in that interval. + """ + window: VectorisedGraphWindow + ): VectorSelection! + """ + Like `expandEntitiesBySimilarity` but restricted to nodes — iteratively + add the highest-scoring adjacent nodes to the selection. + """ + expandNodesBySimilarity( + """ + Natural-language search string; embedded by the server. + """ + query: String!, + """ + Total number of nodes to add across all passes. + """ + limit: Int!, + """ + Optional `{start, end}` to restrict matches to nodes active in that interval. + """ + window: VectorisedGraphWindow + ): VectorSelection! + """ + Like `expandEntitiesBySimilarity` but restricted to edges — iteratively + add the highest-scoring adjacent edges to the selection. + """ + expandEdgesBySimilarity( + """ + Natural-language search string; embedded by the server. + """ + query: String!, + """ + Total number of edges to add across all passes. + """ + limit: Int!, + """ + Optional `{start, end}` to restrict matches to edges active in that interval. + """ + window: VectorisedGraphWindow + ): VectorSelection! } +""" +A graph with embedded vector representations for its nodes and edges. +Exposes similarity search over documents, nodes, and edges, plus +selection building (`emptySelection`) and index maintenance +(`optimizeIndex`). +""" type VectorisedGraph { """ - Optmize the vector index + Rebuild (or incrementally update) the on-disk vector indexes for nodes + and edges so subsequent similarity searches hit the fresh embeddings. + Safe to call repeatedly; returns true on success. """ optimizeIndex: Boolean! """ @@ -3461,28 +5438,73 @@ type VectorisedGraph { """ emptySelection: VectorSelection! """ - Search the top scoring entities according to a specified query returning no more than a specified limit of entities. - """ - entitiesBySimilarity(query: String!, limit: Int!, window: VectorisedGraphWindow): VectorSelection! - """ - Search the top scoring nodes according to a specified query returning no more than a specified limit of nodes. - """ - nodesBySimilarity(query: String!, limit: Int!, window: VectorisedGraphWindow): VectorSelection! - """ - Search the top scoring edges according to a specified query returning no more than a specified limit of edges. - """ - edgesBySimilarity(query: String!, limit: Int!, window: VectorisedGraphWindow): VectorSelection! + Find the highest-scoring nodes *and* edges (mixed) by similarity to a + natural-language query. The query is embedded server-side and matched + against indexed entity vectors. + """ + entitiesBySimilarity( + """ + Natural-language search string; embedded by the server. + """ + query: String!, + """ + Maximum number of results to return. + """ + limit: Int!, + """ + Optional `{start, end}` to restrict matches to entities active in that interval. + """ + window: VectorisedGraphWindow + ): VectorSelection! + """ + Find the highest-scoring nodes by similarity to a natural-language + query. The query is embedded server-side and matched against indexed + node vectors. + """ + nodesBySimilarity( + """ + Natural-language search string; embedded by the server. + """ + query: String!, + """ + Maximum number of nodes to return. + """ + limit: Int!, + """ + Optional `{start, end}` to restrict matches to nodes active in that interval. + """ + window: VectorisedGraphWindow + ): VectorSelection! + """ + Find the highest-scoring edges by similarity to a natural-language + query. The query is embedded server-side and matched against indexed + edge vectors. + """ + edgesBySimilarity( + """ + Natural-language search string; embedded by the server. + """ + query: String!, + """ + Maximum number of edges to return. + """ + limit: Int!, + """ + Optional `{start, end}` to restrict matches to edges active in that interval. + """ + window: VectorisedGraphWindow + ): VectorSelection! } input VectorisedGraphWindow { """ - Start time. + Inclusive lower bound of the search window. """ - start: Int! + start: TimeInput! """ - End time. + Exclusive upper bound of the search window. """ - end: Int! + end: TimeInput! } input Window { @@ -3529,4 +5551,3 @@ schema { query: QueryRoot mutation: MutRoot } - diff --git a/raphtory-graphql/src/cli.rs b/raphtory-graphql/src/cli.rs index 4608de10fd..56a7bbe242 100644 --- a/raphtory-graphql/src/cli.rs +++ b/raphtory-graphql/src/cli.rs @@ -248,6 +248,10 @@ pub async fn cli() -> IoResult<()> { cli_with_args(std::env::args_os()).await } +/// Run the Raphtory GraphQL CLI from Python. Uses `sys.argv` for arguments. +/// +/// Returns: +/// None: #[cfg(feature = "python")] #[pyo3::pyfunction(name = "cli")] pub fn python_cli() -> pyo3::PyResult<()> { diff --git a/raphtory-graphql/src/client/raphtory_client.rs b/raphtory-graphql/src/client/raphtory_client.rs index 18c58bfa86..c8af4c36ae 100644 --- a/raphtory-graphql/src/client/raphtory_client.rs +++ b/raphtory-graphql/src/client/raphtory_client.rs @@ -342,7 +342,8 @@ impl RaphtoryGraphQLClient { GraphQLRemoteGraph::new(path, self.clone()) } - /// Create index on the server. `index_spec` must serialize to the GraphQL IndexSpecInput shape. + /// Create index on the server. `index_spec` must serialize to a value + /// compatible with the GraphQL `IndexSpecInput` type. pub async fn create_index( &self, path: &str, diff --git a/raphtory-graphql/src/lib.rs b/raphtory-graphql/src/lib.rs index 0315aaf19c..dbcbae779d 100644 --- a/raphtory-graphql/src/lib.rs +++ b/raphtory-graphql/src/lib.rs @@ -509,7 +509,6 @@ mod graphql_test { #[tokio::test] async fn test_unique_temporal_properties() { - // TODO: this doesn't test anything? let g = Graph::new(); g.add_metadata([("name", "graph")]).unwrap(); g.add_properties(1, [("state", "abc")]).unwrap(); @@ -537,127 +536,69 @@ mod graphql_test { let data = Data::new(tmp_dir.path(), &AppConfig::default(), Config::default()); save_graphs_to_work_dir(&data, &graphs).await.unwrap(); - let expected = json!({ - "graph": { - "properties": { - "temporal": { - "values": [ - { - "unique": [ - "xyz", - "abc" - ] - } - ] - } - }, - "node": { - "properties": { - "temporal": { - "values": [ - { - "unique": [ - "fax", - "phone" - ] - } - ] - } + let schema = App::create_schema().data(data).finish().unwrap(); + + // Query each `unique` by key so we can assert the typed element shape + // (strings for string props, bools for bool props — not stringified). + let query = r#" + { + graph(path: "graph") { + properties { + temporal { + get(key: "state") { unique } + } + } + node(name: "3") { + properties { + temporal { + get(key: "name") { unique } } - }, - "edge": { - "properties": { - "temporal": { - "values": [ - { - "unique": [ - "open", - "review", - "in-progress" - ] - }, - { - "unique": [ - "false", - "true" - ] - } - ] - } + } + } + edge(src: "1", dst: "2") { + properties { + temporal { + status: get(key: "status") { unique } + state: get(key: "state") { unique } } } } - }); + } + } + "#; - let mut actual_graph_props = HashSet::new(); - let mut actual_node_props = HashSet::new(); - let mut actual_edge_props = HashSet::new(); + let req = Request::new(query); + let res = schema.execute(req).await; + assert!(res.errors.is_empty(), "errors: {:?}", res.errors); + let data = res.data.into_json().unwrap(); - let graph_props = &expected["graph"]["properties"]["temporal"]["values"]; - for value in graph_props.as_array().unwrap().iter() { - let unique_values: HashSet<_> = value["unique"] - .as_array() - .unwrap() - .iter() - .map(|v| v.as_str().unwrap()) - .collect(); - actual_graph_props.extend(unique_values); + fn sorted_unique<'a>(v: &'a Value) -> Vec<&'a Value> { + let mut out: Vec<&Value> = v["unique"].as_array().unwrap().iter().collect(); + // serde_json::Value has a deterministic total order for same-typed values + // and groups by type for mixed inputs — fine for this test. + out.sort_by(|a, b| a.to_string().cmp(&b.to_string())); + out } - let node_props = &expected["graph"]["node"]["properties"]["temporal"]["values"]; - for value in node_props.as_array().unwrap().iter() { - let unique_values: HashSet<_> = value["unique"] - .as_array() - .unwrap() - .iter() - .map(|v| v.as_str().unwrap()) - .collect(); - actual_node_props.extend(unique_values); - } + // graph-level `state` is a string property + let state = sorted_unique(&data["graph"]["properties"]["temporal"]["get"]); + assert_eq!(state, vec![&json!("abc"), &json!("xyz")]); - let edge_props = &expected["graph"]["edge"]["properties"]["temporal"]["values"]; - for value in edge_props.as_array().unwrap().iter() { - let unique_values: HashSet<_> = value["unique"] - .as_array() - .unwrap() - .iter() - .map(|v| v.as_str().unwrap()) - .collect(); - actual_edge_props.extend(unique_values); - } + // node-level `name` is a string property + let name = sorted_unique(&data["graph"]["node"]["properties"]["temporal"]["get"]); + assert_eq!(name, vec![&json!("fax"), &json!("phone")]); + // edge-level `status` is a string property + let status = sorted_unique(&data["graph"]["edge"]["properties"]["temporal"]["status"]); assert_eq!( - actual_graph_props, - expected["graph"]["properties"]["temporal"]["values"][0]["unique"] - .as_array() - .unwrap() - .iter() - .map(|v| v.as_str().unwrap()) - .collect::>() - ); - assert_eq!( - actual_node_props, - expected["graph"]["node"]["properties"]["temporal"]["values"][0]["unique"] - .as_array() - .unwrap() - .iter() - .map(|v| v.as_str().unwrap()) - .collect::>() - ); - assert_eq!( - actual_edge_props, - expected["graph"]["edge"]["properties"]["temporal"]["values"] - .as_array() - .unwrap() - .iter() - .map(|value| value["unique"] - .as_array() - .unwrap() - .iter() - .map(|v| v.as_str().unwrap())) - .flatten() - .collect::>() + status, + vec![&json!("in-progress"), &json!("open"), &json!("review")] ); + + // edge-level `state` is a bool property — must come back as JSON bools, + // not strings "true" / "false". + let edge_state = sorted_unique(&data["graph"]["edge"]["properties"]["temporal"]["state"]); + assert_eq!(edge_state, vec![&json!(false), &json!(true)]); } #[tokio::test] diff --git a/raphtory-graphql/src/model/graph/collection.rs b/raphtory-graphql/src/model/graph/collection.rs index 21e1673239..f2b5b546d8 100644 --- a/raphtory-graphql/src/model/graph/collection.rs +++ b/raphtory-graphql/src/model/graph/collection.rs @@ -89,11 +89,16 @@ where /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> Result> { check_page_limit(ctx, limit)?; diff --git a/raphtory-graphql/src/model/graph/edge.rs b/raphtory-graphql/src/model/graph/edge.rs index db1d8b02f6..29fd37c6d4 100644 --- a/raphtory-graphql/src/model/graph/edge.rs +++ b/raphtory-graphql/src/model/graph/edge.rs @@ -4,6 +4,7 @@ use crate::{ filtering::{EdgeViewCollection, GqlEdgeFilter}, history::GqlHistory, node::GqlNode, + node_id::GqlNodeId, property::{GqlMetadata, GqlProperties}, timeindex::{GqlEventTime, GqlTimeInput}, windowset::GqlEdgeWindowSet, @@ -61,7 +62,11 @@ impl GqlEdge { /// Returns a view of Edge containing all layers in the list of names. /// /// Errors if any of the layers do not exist. - async fn layers(&self, names: Vec) -> GqlEdge { + + async fn layers( + &self, + #[graphql(desc = "Layer names to include.")] names: Vec, + ) -> GqlEdge { let self_clone = self.clone(); blocking_compute(move || self_clone.ee.valid_layers(names).into()).await } @@ -69,7 +74,11 @@ impl GqlEdge { /// Returns a view of Edge containing all layers except the excluded list of names. /// /// Errors if any of the layers do not exist. - async fn exclude_layers(&self, names: Vec) -> GqlEdge { + + async fn exclude_layers( + &self, + #[graphql(desc = "Layer names to exclude.")] names: Vec, + ) -> GqlEdge { let self_clone = self.clone(); blocking_compute(move || self_clone.ee.exclude_valid_layers(names).into()).await } @@ -77,14 +86,19 @@ impl GqlEdge { /// Returns a view of Edge containing the specified layer. /// /// Errors if any of the layers do not exist. - async fn layer(&self, name: String) -> GqlEdge { + + async fn layer(&self, #[graphql(desc = "Layer name to include.")] name: String) -> GqlEdge { self.ee.valid_layers(name).into() } /// Returns a view of Edge containing all layers except the excluded layer specified. /// /// Errors if any of the layers do not exist. - async fn exclude_layer(&self, name: String) -> GqlEdge { + + async fn exclude_layer( + &self, + #[graphql(desc = "Layer name to exclude.")] name: String, + ) -> GqlEdge { self.ee.exclude_valid_layers(name).into() } @@ -97,10 +111,20 @@ impl GqlEdge { /// e.g. "1 month and 1 day" will align at the start of the day. /// Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before /// the start of the first window and/or after the end of the last window (i.e. not included in any window). + async fn rolling( &self, + #[graphql( + desc = "Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`)." + )] window: WindowDuration, + #[graphql( + desc = "Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap." + )] step: Option, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set)." + )] alignment_unit: Option, ) -> Result { let window = window.try_into_interval()?; @@ -120,9 +144,16 @@ impl GqlEdge { /// alignment_unit optionally aligns the windows to the specified unit. "Unaligned" can be passed for no alignment. /// If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. /// e.g. "1 month and 1 day" will align at the start of the day. + async fn expanding( &self, + #[graphql( + desc = "How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`)." + )] step: WindowDuration, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`." + )] alignment_unit: Option, ) -> Result { let step = step.try_into_interval()?; @@ -137,16 +168,28 @@ impl GqlEdge { /// Creates a view of the Edge including all events between the specified start (inclusive) and end (exclusive). /// /// For persistent graphs, any edge which exists at any point during the window will be included. You may want to restrict this to only edges that are present at the end of the window using the is_valid function. - async fn window(&self, start: GqlTimeInput, end: GqlTimeInput) -> GqlEdge { + + async fn window( + &self, + #[graphql(desc = "Inclusive lower bound.")] start: GqlTimeInput, + #[graphql(desc = "Exclusive upper bound.")] end: GqlTimeInput, + ) -> GqlEdge { self.ee.window(start.into_time(), end.into_time()).into() } /// Creates a view of the Edge including all events at a specified time. - async fn at(&self, time: GqlTimeInput) -> GqlEdge { + + async fn at( + &self, + #[graphql(desc = "Instant to pin the view to.")] time: GqlTimeInput, + ) -> GqlEdge { self.ee.at(time.into_time()).into() } - /// Returns a view of the edge at the latest time of the graph. + /// View of this edge pinned to the graph's latest time — equivalent to + /// `at(graph.latestTime)`. The edge's properties and metadata show their + /// most recent values, and (for persistent graphs) validity is evaluated + /// at that instant. async fn latest(&self) -> GqlEdge { self.ee.latest().into() } @@ -154,7 +197,11 @@ impl GqlEdge { /// Creates a view of the Edge including all events that are valid at time. /// /// This is equivalent to before(time + 1) for Graph and at(time) for PersistentGraph. - async fn snapshot_at(&self, time: GqlTimeInput) -> GqlEdge { + + async fn snapshot_at( + &self, + #[graphql(desc = "Instant at which entities must be valid.")] time: GqlTimeInput, + ) -> GqlEdge { self.ee.snapshot_at(time.into_time()).into() } @@ -166,34 +213,66 @@ impl GqlEdge { } /// Creates a view of the Edge including all events before a specified end (exclusive). - async fn before(&self, time: GqlTimeInput) -> GqlEdge { + + async fn before( + &self, + #[graphql(desc = "Exclusive upper bound.")] time: GqlTimeInput, + ) -> GqlEdge { self.ee.before(time.into_time()).into() } /// Creates a view of the Edge including all events after a specified start (exclusive). - async fn after(&self, time: GqlTimeInput) -> GqlEdge { + + async fn after( + &self, + #[graphql(desc = "Exclusive lower bound.")] time: GqlTimeInput, + ) -> GqlEdge { self.ee.after(time.into_time()).into() } /// Shrinks both the start and end of the window. - async fn shrink_window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + + async fn shrink_window( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.ee .shrink_window(start.into_time(), end.into_time()) .into() } /// Set the start of the window. - async fn shrink_start(&self, start: GqlTimeInput) -> Self { + + async fn shrink_start( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + ) -> Self { self.ee.shrink_start(start.into_time()).into() } /// Set the end of the window. - async fn shrink_end(&self, end: GqlTimeInput) -> Self { + + async fn shrink_end( + &self, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.ee.shrink_end(end.into_time()).into() } /// Takes a specified selection of views and applies them in given order. - async fn apply_views(&self, views: Vec) -> Result { + + async fn apply_views( + &self, + #[graphql( + desc = "Ordered list of view operations; each entry is a one-of variant (`window`, `layer`, `filter`, ...) applied to the running result." + )] + views: Vec, + ) -> Result { let mut return_view: GqlEdge = self.ee.clone().into(); for view in views { return_view = match view { @@ -248,6 +327,10 @@ impl GqlEdge { self.ee.earliest_time().into() } + /// The timestamp of the first event in this edge's history (first update, first + /// deletion, or anything in between). Differs from `earliestTime` in that + /// `earliestTime` reports when the edge is first *valid*; `firstUpdate` reports + /// when its history actually begins. async fn first_update(&self) -> GqlEventTime { let self_clone = self.clone(); blocking_compute(move || self_clone.ee.history().earliest_time().into()).await @@ -258,6 +341,10 @@ impl GqlEdge { self.ee.latest_time().into() } + /// The timestamp of the last event in this edge's history (last update, last + /// deletion, or anything in between). Differs from `latestTime` in that + /// `latestTime` reports when the edge is last *valid*; `lastUpdate` reports + /// when its history actually ends. async fn last_update(&self) -> GqlEventTime { let self_clone = self.clone(); blocking_compute(move || self_clone.ee.history().latest_time().into()).await @@ -302,13 +389,12 @@ impl GqlEdge { self.ee.nbr().into() } - /// Returns the id of the edge. - /// - /// Returns: - /// list[str]: - async fn id(&self) -> Vec { - let (src_name, dst_name) = self.ee.id(); - vec![src_name.to_string(), dst_name.to_string()] + /// Returns the `[src, dst]` id pair of the edge. Each id is a `String` + /// for string-indexed graphs or a non-negative `Int` for integer-indexed + /// graphs. + async fn id(&self) -> Vec { + let (src_id, dst_id) = self.ee.id(); + vec![GqlNodeId(src_id), GqlNodeId(dst_id)] } /// Returns a view of the properties of the edge. @@ -393,7 +479,14 @@ impl GqlEdge { self.ee.is_self_loop() } - async fn filter(&self, expr: GqlEdgeFilter) -> Result { + /// Apply an edge filter in place, returning an edge view whose properties / + /// metadata / history are restricted to the matching subset. + + async fn filter( + &self, + #[graphql(desc = "Composite edge filter (by property, layer, src/dst, etc.).")] + expr: GqlEdgeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeEdgeFilter = expr.try_into()?; diff --git a/raphtory-graphql/src/model/graph/edges.rs b/raphtory-graphql/src/model/graph/edges.rs index e393c327e4..084e2d37c3 100644 --- a/raphtory-graphql/src/model/graph/edges.rs +++ b/raphtory-graphql/src/model/graph/edges.rs @@ -32,6 +32,9 @@ use raphtory::db::{ api::view::Filter, graph::views::filter::model::edge_filter::CompositeEdgeFilter, }; +/// A lazy collection of edges from a graph view. Supports the usual view +/// transforms (window, layer, filter, ...), plus edge-specific ones like +/// `explode` and `explodeLayers`, pagination, and sorting. #[derive(ResolvedObject, Clone)] #[graphql(name = "Edges")] pub(crate) struct GqlEdges { @@ -70,24 +73,37 @@ impl GqlEdges { } /// Returns a collection containing only edges belonging to the listed layers. - async fn layers(&self, names: Vec) -> Self { + + async fn layers( + &self, + #[graphql(desc = "Layer names to include.")] names: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || self_clone.update(self_clone.ee.valid_layers(names))).await } /// Returns a collection containing edges belonging to all layers except the excluded list of layers. - async fn exclude_layers(&self, names: Vec) -> Self { + + async fn exclude_layers( + &self, + #[graphql(desc = "Layer names to exclude.")] names: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || self_clone.update(self_clone.ee.exclude_valid_layers(names))).await } /// Returns a collection containing edges belonging to the specified layer. - async fn layer(&self, name: String) -> Self { + + async fn layer(&self, #[graphql(desc = "Layer name to include.")] name: String) -> Self { self.update(self.ee.valid_layers(name)) } /// Returns a collection containing edges belonging to all layers except the excluded layer specified. - async fn exclude_layer(&self, name: String) -> Self { + + async fn exclude_layer( + &self, + #[graphql(desc = "Layer name to exclude.")] name: String, + ) -> Self { self.update(self.ee.exclude_valid_layers(name)) } @@ -100,10 +116,20 @@ impl GqlEdges { /// e.g. "1 month and 1 day" will align at the start of the day. /// Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before /// the start of the first window and/or after the end of the last window (i.e. not included in any window). + async fn rolling( &self, + #[graphql( + desc = "Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`)." + )] window: WindowDuration, + #[graphql( + desc = "Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap." + )] step: Option, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set)." + )] alignment_unit: Option, ) -> Result { let window = window.try_into_interval()?; @@ -123,9 +149,16 @@ impl GqlEdges { /// alignment_unit optionally aligns the windows to the specified unit. "Unaligned" can be passed for no alignment. /// If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. /// e.g. "1 month and 1 day" will align at the start of the day. + async fn expanding( &self, + #[graphql( + desc = "How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`)." + )] step: WindowDuration, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`." + )] alignment_unit: Option, ) -> Result { let step = step.try_into_interval()?; @@ -138,15 +171,25 @@ impl GqlEdges { } /// Creates a view of the Edge including all events between the specified start (inclusive) and end (exclusive). - async fn window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + + async fn window( + &self, + #[graphql(desc = "Inclusive lower bound.")] start: GqlTimeInput, + #[graphql(desc = "Exclusive upper bound.")] end: GqlTimeInput, + ) -> Self { self.update(self.ee.window(start.into_time(), end.into_time())) } /// Creates a view of the Edge including all events at a specified time. - async fn at(&self, time: GqlTimeInput) -> Self { + + async fn at( + &self, + #[graphql(desc = "Instant to pin the view to.")] time: GqlTimeInput, + ) -> Self { self.update(self.ee.at(time.into_time())) } + /// View showing only the latest state of each edge (equivalent to `at(latestTime)`). async fn latest(&self) -> Self { let e = self.ee.clone(); let latest = blocking_compute(move || e.latest()).await; @@ -154,7 +197,11 @@ impl GqlEdges { } /// Creates a view of the Edge including all events that are valid at time. This is equivalent to before(time + 1) for Graph and at(time) for PersistentGraph. - async fn snapshot_at(&self, time: GqlTimeInput) -> Self { + + async fn snapshot_at( + &self, + #[graphql(desc = "Instant at which entities must be valid.")] time: GqlTimeInput, + ) -> Self { self.update(self.ee.snapshot_at(time.into_time())) } @@ -164,32 +211,58 @@ impl GqlEdges { } /// Creates a view of the Edge including all events before a specified end (exclusive). - async fn before(&self, time: GqlTimeInput) -> Self { + + async fn before(&self, #[graphql(desc = "Exclusive upper bound.")] time: GqlTimeInput) -> Self { self.update(self.ee.before(time.into_time())) } /// Creates a view of the Edge including all events after a specified start (exclusive). - async fn after(&self, time: GqlTimeInput) -> Self { + + async fn after(&self, #[graphql(desc = "Exclusive lower bound.")] time: GqlTimeInput) -> Self { self.update(self.ee.after(time.into_time())) } /// Shrinks both the start and end of the window. - async fn shrink_window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + + async fn shrink_window( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.update(self.ee.shrink_window(start.into_time(), end.into_time())) } /// Set the start of the window. - async fn shrink_start(&self, start: GqlTimeInput) -> Self { + + async fn shrink_start( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + ) -> Self { self.update(self.ee.shrink_start(start.into_time())) } /// Set the end of the window. - async fn shrink_end(&self, end: GqlTimeInput) -> Self { + + async fn shrink_end( + &self, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.update(self.ee.shrink_end(end.into_time())) } /// Takes a specified selection of views and applies them in order given. - async fn apply_views(&self, views: Vec) -> Result { + + async fn apply_views( + &self, + #[graphql( + desc = "Ordered list of view operations; each entry is a one-of variant (`window`, `layer`, `filter`, ...) applied to the running result." + )] + views: Vec, + ) -> Result { let mut return_view: GqlEdges = self.update(self.ee.clone()); for view in views { return_view = match view { @@ -238,7 +311,9 @@ impl GqlEdges { Ok(return_view) } - /// Returns an edge object for each update within the original edge. + /// Expand each edge into one edge per update: if `A->B` has three updates, it + /// becomes three `A->B` entries each at a distinct timestamp. Use this to + /// iterate per-event rather than per-edge. async fn explode(&self) -> Self { self.update(self.ee.explode()) } @@ -250,8 +325,16 @@ impl GqlEdges { self.update(self.ee.explode_layers()) } - /// Specify a sort order from: source, destination, property, time. You can also reverse the ordering. - async fn sorted(&self, sort_bys: Vec) -> Self { + /// Sort the edges. Multiple criteria are applied lexicographically (ties + /// on the first key break to the second, etc.). + + async fn sorted( + &self, + #[graphql( + desc = "Ordered list of sort keys. Each entry chooses exactly one of `src` / `dst` / `time` / `property`, with an optional `reverse: true` to flip order." + )] + sort_bys: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || { let sorted: Arc<[_]> = self_clone @@ -344,11 +427,16 @@ impl GqlEdges { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -367,8 +455,27 @@ impl GqlEdges { Ok(blocking_compute(move || self_clone.iter().collect()).await) } - /// Returns a filtered view that applies to list down the chain - async fn filter(&self, expr: GqlEdgeFilter) -> Result { + /// Narrow the collection to edges matching `expr`. The filter sticks to the + /// returned view — every subsequent traversal through these edges (their + /// properties, their endpoints' neighbours, etc.) continues to see the + /// filtered scope. + /// + /// Useful when you want one scoping rule to apply across the whole query. + /// E.g. restricting everything to a specific week: + /// + /// ```text + /// edges { filter(expr: {window: {start: 1234, end: 5678}}) { + /// list { src { neighbours { list { name } } } } # neighbours still windowed + /// } } + /// ``` + /// + /// Contrast with `select`, which applies here and is not carried through. + + async fn filter( + &self, + #[graphql(desc = "Composite edge filter (by property, layer, src/dst, etc.).")] + expr: GqlEdgeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeEdgeFilter = expr.try_into()?; @@ -378,8 +485,30 @@ impl GqlEdges { .await } - /// Returns filtered list of edges - async fn select(&self, expr: GqlEdgeFilter) -> Result { + /// Narrow the collection to edges matching `expr`, but only at this step — + /// subsequent traversals out of these edges see the unfiltered graph again. + /// + /// Useful when you want different scopes at different hops. E.g. Monday's + /// edges, then the neighbours of their endpoints on Tuesday, then *those* + /// neighbours on Wednesday: + /// + /// ```text + /// edges { select(expr: {window: {...monday...}}) { + /// list { src { select(expr: {window: {...tuesday...}}) { + /// neighbours { select(expr: {window: {...wednesday...}}) { + /// neighbours { list { name } } + /// } } + /// } } } + /// } } + /// ``` + /// + /// Contrast with `filter`, which persists the scope through subsequent ops. + + async fn select( + &self, + #[graphql(desc = "Composite edge filter (by property, layer, src/dst, etc.).")] + expr: GqlEdgeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeEdgeFilter = expr.try_into()?; diff --git a/raphtory-graphql/src/model/graph/filtering.rs b/raphtory-graphql/src/model/graph/filtering.rs index b2bfe9cc8b..8f344b4964 100644 --- a/raphtory-graphql/src/model/graph/filtering.rs +++ b/raphtory-graphql/src/model/graph/filtering.rs @@ -1,4 +1,4 @@ -use crate::model::graph::{property::Value, timeindex::GqlTimeInput}; +use crate::model::graph::{node_id::GqlNodeId, property::Value, timeindex::GqlTimeInput}; use async_graphql::dynamic::ValueAccessor; use dynamic_graphql::{ internal::{ @@ -60,11 +60,11 @@ pub enum GraphViewCollection { /// Single excluded layer. ExcludeLayer(String), /// Subgraph nodes. - Subgraph(Vec), + Subgraph(Vec), /// Subgraph node types. SubgraphNodeTypes(Vec), /// List of excluded nodes. - ExcludeNodes(Vec), + ExcludeNodes(Vec), /// Valid state. Valid(bool), /// Window between a start and end time. diff --git a/raphtory-graphql/src/model/graph/graph.rs b/raphtory-graphql/src/model/graph/graph.rs index 927379189c..3a76bece60 100644 --- a/raphtory-graphql/src/model/graph/graph.rs +++ b/raphtory-graphql/src/model/graph/graph.rs @@ -8,6 +8,7 @@ use crate::{ filtering::{GqlEdgeFilter, GqlGraphFilter, GqlNodeFilter, GraphViewCollection}, index::GqlIndexSpec, node::GqlNode, + node_id::GqlNodeId, nodes::GqlNodes, property::{GqlMetadata, GqlProperties}, timeindex::{GqlEventTime, GqlTimeInput}, @@ -57,6 +58,9 @@ use std::{ sync::Arc, }; +/// A view of a Raphtory graph. Every field here returns either data from the +/// view or a derived view (`window`, `layer`, `at`, `filter`, ...) that you can +/// keep chaining. Views are cheap — they don't copy the underlying data. #[derive(ResolvedObject, Clone)] #[graphql(name = "Graph")] pub(crate) struct GqlGraph { @@ -97,58 +101,101 @@ impl GqlGraph { //////////////////////// /// Returns the names of all layers in the graphview. + /// Distinct layer names observed in the current view — any layer that has at + /// least one edge event visible here. Excludes layers that exist elsewhere in + /// the graph but whose edges have been filtered out. async fn unique_layers(&self) -> Vec { let self_clone = self.clone(); blocking_compute(move || self_clone.graph.unique_layers().map_into().collect()).await } - /// Returns a view containing only the default layer. + /// View restricted to the default layer — where nodes and edges end up + /// when `addNode` / `addEdge` is called without a `layer` argument. + /// Useful for separating "unlayered" base-graph events from named-layer + /// ones. async fn default_layer(&self) -> GqlGraph { self.apply(|g| g.default_layer()) } - /// Returns a view containing all the specified layers. - async fn layers(&self, names: Vec) -> GqlGraph { + /// View restricted to the named layers. Updates on any other layer are hidden; + /// if that leaves a node or edge with no updates left, it disappears from the + /// view. + + async fn layers( + &self, + #[graphql(desc = "Layer names to include.")] names: Vec, + ) -> GqlGraph { let self_clone = self.clone(); blocking_compute(move || self_clone.apply(|g| g.valid_layers(names.clone()))).await } - /// Returns a view containing all layers except the specified excluded layers. - async fn exclude_layers(&self, names: Vec) -> GqlGraph { + /// View with the named layers hidden. Updates on those layers are removed; if + /// that leaves a node or edge with no updates left, it disappears from the + /// view. + + async fn exclude_layers( + &self, + #[graphql(desc = "Layer names to exclude.")] names: Vec, + ) -> GqlGraph { let self_clone = self.clone(); blocking_compute(move || self_clone.apply(|g| g.exclude_valid_layers(names.clone()))).await } - /// Returns a view containing the layer specified. - async fn layer(&self, name: String) -> GqlGraph { + /// View restricted to a single layer. Convenience form of + /// `layers(names: [name])` — updates on any other layer are hidden, and + /// entities with nothing left disappear. + + async fn layer(&self, #[graphql(desc = "Layer name to include.")] name: String) -> GqlGraph { self.apply(|g| g.valid_layers(name.clone())) } - /// Returns a view containing all layers except the specified excluded layer. - async fn exclude_layer(&self, name: String) -> GqlGraph { + /// View with one layer hidden. Convenience form of + /// `excludeLayers(names: [name])` — updates on that layer are removed, and + /// entities with nothing left disappear. + + async fn exclude_layer( + &self, + #[graphql(desc = "Layer name to exclude.")] name: String, + ) -> GqlGraph { self.apply(|g| g.exclude_valid_layers(name.clone())) } - /// Returns a subgraph of a specified set of nodes which contains only the edges that connect nodes of the subgraph to each other. - async fn subgraph(&self, nodes: Vec) -> GqlGraph { + /// View restricted to a chosen set of nodes and the edges between them. Edges + /// connecting a selected node to a non-selected node are hidden. + + async fn subgraph( + &self, + #[graphql(desc = "Node ids to keep.")] nodes: Vec, + ) -> GqlGraph { let self_clone = self.clone(); blocking_compute(move || self_clone.apply(|g| g.subgraph(nodes.clone()))).await } - /// Returns a view of the graph that only includes valid edges. + /// View containing only valid edges — for persistent graphs this drops edges + /// whose most recent event is a deletion at the latest time of the current + /// view (a later re-addition would keep them). On event graphs this is a + /// no-op. async fn valid(&self) -> GqlGraph { self.apply(|g| g.valid()) } - /// Returns a subgraph filtered by the specified node types. - async fn subgraph_node_types(&self, node_types: Vec) -> GqlGraph { + /// View restricted to nodes with the given node types. + + async fn subgraph_node_types( + &self, + #[graphql(desc = "Node types to include.")] node_types: Vec, + ) -> GqlGraph { let self_clone = self.clone(); blocking_compute(move || self_clone.apply(|g| g.subgraph_node_types(node_types.clone()))) .await } - /// Returns a subgraph containing all nodes except the specified excluded nodes. - async fn exclude_nodes(&self, nodes: Vec) -> GqlGraph { + /// View with a set of nodes removed (along with any edges touching them). + + async fn exclude_nodes( + &self, + #[graphql(desc = "Node ids to exclude.")] nodes: Vec, + ) -> GqlGraph { let self_clone = self.clone(); blocking_compute(move || { let nodes: Vec = nodes.iter().map(|v| v.as_node_ref()).collect(); @@ -166,10 +213,20 @@ impl GqlGraph { /// e.g. "1 month and 1 day" will align at the start of the day. /// Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before /// the start of the first window and/or after the end of the last window (i.e. not included in any window). + async fn rolling( &self, + #[graphql( + desc = "Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`)." + )] window: WindowDuration, + #[graphql( + desc = "Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap." + )] step: Option, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set)." + )] alignment_unit: Option, ) -> Result { let window = window.try_into_interval()?; @@ -189,9 +246,16 @@ impl GqlGraph { /// alignment_unit optionally aligns the windows to the specified unit. "Unaligned" can be passed for no alignment. /// If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. /// e.g. "1 month and 1 day" will align at the start of the day. + async fn expanding( &self, + #[graphql( + desc = "How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`)." + )] step: WindowDuration, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`." + )] alignment_unit: Option, ) -> Result { let step = step.try_into_interval()?; @@ -204,14 +268,23 @@ impl GqlGraph { } /// Return a graph containing only the activity between start and end, by default raphtory stores times in milliseconds from the unix epoch. - async fn window(&self, start: GqlTimeInput, end: GqlTimeInput) -> GqlGraph { + + async fn window( + &self, + #[graphql(desc = "Inclusive lower bound.")] start: GqlTimeInput, + #[graphql(desc = "Exclusive upper bound.")] end: GqlTimeInput, + ) -> GqlGraph { let start = start.into_time(); let end = end.into_time(); self.apply(|g| g.window(start, end)) } /// Creates a view including all events at a specified time. - async fn at(&self, time: GqlTimeInput) -> GqlGraph { + + async fn at( + &self, + #[graphql(desc = "Instant to pin the view to.")] time: GqlTimeInput, + ) -> GqlGraph { let time = time.into_time(); self.apply(|g| g.at(time)) } @@ -223,7 +296,11 @@ impl GqlGraph { } /// Create a view including all events that are valid at the specified time. - async fn snapshot_at(&self, time: GqlTimeInput) -> GqlGraph { + + async fn snapshot_at( + &self, + #[graphql(desc = "Instant at which entities must be valid.")] time: GqlTimeInput, + ) -> GqlGraph { let time = time.into_time(); self.apply(|g| g.snapshot_at(time)) } @@ -234,32 +311,62 @@ impl GqlGraph { } /// Create a view including all events before a specified end (exclusive). - async fn before(&self, time: GqlTimeInput) -> GqlGraph { + + async fn before( + &self, + #[graphql(desc = "Exclusive upper bound.")] time: GqlTimeInput, + ) -> GqlGraph { let time = time.into_time(); self.apply(|g| g.before(time)) } /// Create a view including all events after a specified start (exclusive). - async fn after(&self, time: GqlTimeInput) -> GqlGraph { + + async fn after( + &self, + #[graphql(desc = "Exclusive lower bound.")] time: GqlTimeInput, + ) -> GqlGraph { let time = time.into_time(); self.apply(|g| g.after(time)) } - /// Shrink both the start and end of the window. - async fn shrink_window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + /// Shrink both the start and end of the window. The new bounds are taken as the + /// intersection with the current window; this never widens the view. + + async fn shrink_window( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if before the current start.")] + start: GqlTimeInput, + #[graphql(desc = "Proposed new end (TimeInput); ignored if after the current end.")] + end: GqlTimeInput, + ) -> Self { let start = start.into_time(); let end = end.into_time(); self.apply(|g| g.shrink_window(start, end)) } /// Set the start of the window to the larger of the specified value or current start. - async fn shrink_start(&self, start: GqlTimeInput) -> Self { + + async fn shrink_start( + &self, + #[graphql( + desc = "Proposed new start (TimeInput); has no effect if it would widen the window." + )] + start: GqlTimeInput, + ) -> Self { let start = start.into_time(); self.apply(|g| g.shrink_start(start)) } /// Set the end of the window to the smaller of the specified value or current end. - async fn shrink_end(&self, end: GqlTimeInput) -> Self { + + async fn shrink_end( + &self, + #[graphql( + desc = "Proposed new end (TimeInput); has no effect if it would widen the window." + )] + end: GqlTimeInput, + ) -> Self { let end = end.into_time(); self.apply(|g| g.shrink_end(end)) } @@ -268,7 +375,9 @@ impl GqlGraph { //// TIME QUERIES ////// //////////////////////// - /// Returns the timestamp for the creation of the graph. + /// Filesystem creation timestamp (epoch millis) of the graph's on-disk folder + /// — i.e. when this graph was first saved to the server, not when its earliest + /// event occurred. Use `earliestTime` for the latter. async fn created(&self) -> Result { Ok(self.path.created_async().await?) } @@ -305,8 +414,17 @@ impl GqlGraph { Ok(self.graph.end().into()) } - /// Returns the earliest time that any edge in this graph is valid. - async fn earliest_edge_time(&self, include_negative: Option) -> Result { + /// The earliest time at which any edge in this graph is valid. + /// + /// * `includeNegative` — if false, edge events with a timestamp `< 0` are + /// skipped when computing the minimum. Defaults to true. + async fn earliest_edge_time( + &self, + #[graphql( + desc = "If false, edge events with a timestamp `< 0` are skipped when computing the minimum. Defaults to true." + )] + include_negative: Option, + ) -> Result { let self_clone = self.clone(); Ok(blocking_compute(move || { let include_negative = include_negative.unwrap_or(true); @@ -322,8 +440,15 @@ impl GqlGraph { .await) } - /// Returns the latest time that any edge in this graph is valid. - async fn latest_edge_time(&self, include_negative: Option) -> Result { + /// The latest time at which any edge in this graph is valid. + + async fn latest_edge_time( + &self, + #[graphql( + desc = "If false, edge events with a timestamp `< 0` are skipped when computing the maximum. Defaults to true." + )] + include_negative: Option, + ) -> Result { let self_clone = self.clone(); Ok(blocking_compute(move || { let include_negative = include_negative.unwrap_or(true); @@ -370,13 +495,27 @@ impl GqlGraph { //// EXISTS CHECKERS /// //////////////////////// - /// Returns true if the graph contains the specified node. - async fn has_node(&self, name: String) -> Result { + /// Returns true if a node with the given id exists in this view. + + async fn has_node( + &self, + #[graphql(desc = "Node id to look up.")] name: GqlNodeId, + ) -> Result { Ok(self.graph.has_node(name)) } - /// Returns true if the graph contains the specified edge. Edges are specified by providing a source and destination node id. You can restrict the search to a specified layer. - async fn has_edge(&self, src: String, dst: String, layer: Option) -> Result { + /// Returns true if an edge exists between `src` and `dst` in this view, optionally + /// restricted to a single layer. + + async fn has_edge( + &self, + #[graphql(desc = "Source node id.")] src: GqlNodeId, + #[graphql(desc = "Destination node id.")] dst: GqlNodeId, + #[graphql( + desc = "Optional; if provided, only checks whether the edge exists on this layer. If null or omitted, any layer counts." + )] + layer: Option, + ) -> Result { Ok(match layer { Some(name) => self .graph @@ -391,13 +530,22 @@ impl GqlGraph { //////// GETTERS /////// //////////////////////// - /// Gets the node with the specified id. - async fn node(&self, name: String) -> Result> { + /// Look up a single node by id. Returns null if the node doesn't exist in this + /// view. + + async fn node(&self, #[graphql(desc = "Node id.")] name: GqlNodeId) -> Result> { Ok(self.graph.node(name).map(|node| node.into())) } - /// Gets (optionally a subset of) the nodes in the graph. - async fn nodes(&self, select: Option) -> Result { + /// All nodes in this view, optionally narrowed by a filter. + + async fn nodes( + &self, + #[graphql( + desc = "Optional node filter (by name, property, type, etc.). If omitted, every node in the view is returned." + )] + select: Option, + ) -> Result { let nn = self.graph.nodes(); if let Some(sel) = select { @@ -413,13 +561,26 @@ impl GqlGraph { Ok(GqlNodes::new(nn)) } - /// Gets the edge with the specified source and destination nodes. - async fn edge(&self, src: String, dst: String) -> Result> { + /// Look up a single edge by its endpoint ids. Returns null if no edge exists + /// between `src` and `dst` in this view. + + async fn edge( + &self, + #[graphql(desc = "Source node id.")] src: GqlNodeId, + #[graphql(desc = "Destination node id.")] dst: GqlNodeId, + ) -> Result> { Ok(self.graph.edge(src, dst).map(|e| e.into())) } - /// Gets the edges in the graph. - async fn edges<'a>(&self, select: Option) -> Result { + /// All edges in this view, optionally narrowed by a filter. + + async fn edges<'a>( + &self, + #[graphql( + desc = "Optional edge filter (by property, layer, src/dst, etc.). If omitted, every edge in the view is returned." + )] + select: Option, + ) -> Result { let base = self.graph.edges_unlocked(); if let Some(sel) = select { @@ -477,11 +638,23 @@ impl GqlGraph { Ok(blocking_compute(move || GraphSchema::new(&self_clone.graph)).await) } + /// Access registered graph algorithms (PageRank, shortest path, etc.) for this + /// graph view. The set of available algorithms is defined by the plugin registry + /// loaded at server startup. async fn algorithms(&self) -> GraphAlgorithmPlugin { self.graph.clone().into() } - async fn shared_neighbours(&self, selected_nodes: Vec) -> Result> { + /// Nodes that are neighbours of every node in `selectedNodes`. Returns the + /// intersection of each selected node's neighbour set (undirected). + + async fn shared_neighbours( + &self, + #[graphql( + desc = "Node ids whose common neighbours you want. Returns an empty list if `selectedNodes` is empty or any id does not exist." + )] + selected_nodes: Vec, + ) -> Result> { let self_clone = self.clone(); Ok(blocking_compute(move || { if selected_nodes.is_empty() { @@ -512,8 +685,15 @@ impl GqlGraph { .await) } - /// Export all nodes and edges from this graph view to another existing graph - async fn export_to<'a>(&self, ctx: &Context<'a>, path: String) -> Result { + /// Copy all nodes and edges of the current graph view into another already- + /// existing graph stored on the server. The destination graph is preserved + /// — this only adds; it does not replace. + + async fn export_to<'a>( + &self, + ctx: &Context<'a>, + #[graphql(desc = "Destination graph path relative to the root namespace.")] path: String, + ) -> Result { let data = ctx.data_unchecked::(); let other_g = data.get_graph(path.as_ref()).await?.graph; let g = self.graph.clone(); @@ -525,7 +705,16 @@ impl GqlGraph { .await } - async fn filter(&self, expr: Option) -> Result { + /// Returns a filtered view of the graph. Applies a mixed node/edge filter + /// expression and narrows nodes, edges, and their properties to what matches. + + async fn filter( + &self, + #[graphql( + desc = "Optional composite filter combining node, edge, property, and metadata conditions. If omitted, applies the identity filter (equivalent to no filtering)." + )] + expr: Option, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: DynView = match expr { @@ -541,7 +730,14 @@ impl GqlGraph { .await } - async fn filter_nodes(&self, expr: GqlNodeFilter) -> Result { + /// Returns a graph view restricted to nodes that match the given filter; edges + /// are kept only if both endpoints survive. + + async fn filter_nodes( + &self, + #[graphql(desc = "Composite node filter (by name, property, type, etc.).")] + expr: GqlNodeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeNodeFilter = expr.try_into()?; @@ -554,7 +750,14 @@ impl GqlGraph { .await } - async fn filter_edges(&self, expr: GqlEdgeFilter) -> Result { + /// Returns a graph view restricted to edges that match the given filter. Nodes + /// remain in the view even if all their edges are filtered out. + + async fn filter_edges( + &self, + #[graphql(desc = "Composite edge filter (by property, layer, src/dst, etc.).")] + expr: GqlEdgeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeEdgeFilter = expr.try_into()?; @@ -591,14 +794,16 @@ impl GqlGraph { } } - /// (Experimental) Searches for nodes which match the given filter expression. - /// - /// Uses Tantivy's exact search. + /// (Experimental) Searches for nodes which match the given filter + /// expression. Uses Tantivy's exact search; requires the graph to have + /// been indexed. + async fn search_nodes( &self, + #[graphql(desc = "Composite node filter (by name, property, type, etc.).")] filter: GqlNodeFilter, - limit: usize, - offset: usize, + #[graphql(desc = "Maximum number of nodes to return.")] limit: usize, + #[graphql(desc = "Number of matches to skip before returning results.")] offset: usize, ) -> Result> { #[cfg(feature = "search")] { @@ -617,14 +822,16 @@ impl GqlGraph { } } - /// (Experimental) Searches the index for edges which match the given filter expression. - /// - /// Uses Tantivy's exact search. + /// (Experimental) Searches the index for edges which match the given + /// filter expression. Uses Tantivy's exact search; requires the graph to + /// have been indexed. + async fn search_edges( &self, + #[graphql(desc = "Composite edge filter (by property, layer, src/dst, etc.).")] filter: GqlEdgeFilter, - limit: usize, - offset: usize, + #[graphql(desc = "Maximum number of edges to return.")] limit: usize, + #[graphql(desc = "Number of matches to skip before returning results.")] offset: usize, ) -> Result> { #[cfg(feature = "search")] { @@ -643,9 +850,17 @@ impl GqlGraph { } } - /// Returns the specified graph view or if none is specified returns the default view. - /// This allows you to specify multiple operations together. - async fn apply_views(&self, views: Vec) -> Result { + /// Apply a list of view operations in the given order and return the + /// resulting graph view. Lets callers compose multiple view transforms + /// (window, layer, filter, snapshot, ...) in a single call. + + async fn apply_views( + &self, + #[graphql( + desc = "Ordered list of view operations; each entry is a one-of variant applied to the running result." + )] + views: Vec, + ) -> Result { let mut return_view: GqlGraph = GqlGraph::new(self.path.clone(), self.graph.clone()); for view in views { return_view = match view { diff --git a/raphtory-graphql/src/model/graph/history.rs b/raphtory-graphql/src/model/graph/history.rs index 7e66f5db17..fbb629da22 100644 --- a/raphtory-graphql/src/model/graph/history.rs +++ b/raphtory-graphql/src/model/graph/history.rs @@ -74,11 +74,16 @@ impl GqlHistory { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -101,11 +106,16 @@ impl GqlHistory { /// /// For example, if page_rev(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page_rev( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -149,7 +159,14 @@ impl GqlHistory { /// Useful for converting millisecond timestamps into easily readable datetime strings. /// Optionally, a format string can be passed to format the output. Defaults to RFC 3339 if not provided (e.g., "2023-12-25T10:30:45.123Z"). /// Refer to chrono::format::strftime for formatting specifiers and escape sequences. - async fn datetimes(&self, format_string: Option) -> GqlHistoryDateTime { + + async fn datetimes( + &self, + #[graphql( + desc = "Optional format string for the rendered datetime. Uses `%`-style specifiers — for example `%Y-%m-%d` for `2024-01-15`, `%Y-%m-%d %H:%M:%S` for `2024-01-15 10:30:00`, or `%H:%M` for `10:30`. Defaults to RFC 3339 (e.g. `2024-01-15T10:30:45.123+00:00`) when omitted." + )] + format_string: Option, + ) -> GqlHistoryDateTime { let self_clone = self.clone(); blocking_compute(move || GqlHistoryDateTime { history_dt: HistoryDateTime::new(self_clone.history.0.clone()), // clone the Arc, not the underlying object @@ -168,7 +185,10 @@ impl GqlHistory { .await } - /// Returns an Intervals object which calculates the intervals between consecutive EventTime timestamps. + /// Inter-event gap analysis for this history. The returned `Intervals` + /// object exposes each gap (in milliseconds) between consecutive events, + /// plus summary statistics — `min` / `max` / `mean` / `median` — and + /// paginated access via `list` / `listRev` / `page` / `pageRev`. async fn intervals(&self) -> GqlIntervals { let self_clone = self.clone(); blocking_compute(move || GqlIntervals { @@ -206,11 +226,16 @@ impl GqlHistoryTimestamp { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -232,11 +257,16 @@ impl GqlHistoryTimestamp { /// /// For example, if page_rev(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page_rev( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -267,9 +297,13 @@ impl GqlHistoryDateTime { /// List all datetimes formatted as strings. /// If filter_broken is set to True, time conversion errors will be ignored. If set to False, a TimeError /// will be raised on time conversion error. Defaults to False. + async fn list( &self, ctx: &Context<'_>, + #[graphql( + desc = "If true, ignore unconvertible timestamps; if false, raise an error on the first conversion failure. Defaults to false." + )] filter_broken: Option, ) -> Result, Error> { check_list_allowed(ctx)?; @@ -303,9 +337,13 @@ impl GqlHistoryDateTime { /// List all datetimes formatted as strings in reverse chronological order. /// If filter_broken is set to True, time conversion errors will be ignored. If set to False, a TimeError /// will be raised on time conversion error. Defaults to False. + async fn list_rev( &self, ctx: &Context<'_>, + #[graphql( + desc = "If true, ignore unconvertible timestamps; if false, raise an error on the first conversion failure. Defaults to false." + )] filter_broken: Option, ) -> Result, Error> { check_list_allowed(ctx)?; @@ -343,12 +381,20 @@ impl GqlHistoryDateTime { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, + #[graphql( + desc = "If true, skip timestamps whose conversion fails; if false, raise an error on the first conversion failure. Defaults to false." + )] filter_broken: Option, ) -> Result, Error> { check_page_limit(ctx, limit)?; @@ -390,12 +436,20 @@ impl GqlHistoryDateTime { /// /// For example, if page_rev(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page_rev( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, + #[graphql( + desc = "If true, skip timestamps whose conversion fails; if false, raise an error on the first conversion failure. Defaults to false." + )] filter_broken: Option, ) -> Result, Error> { check_page_limit(ctx, limit)?; @@ -473,11 +527,16 @@ impl GqlHistoryEventId { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -500,11 +559,16 @@ impl GqlHistoryEventId { /// /// For example, if page_rev(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page_rev( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -551,11 +615,16 @@ impl GqlIntervals { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -577,11 +646,16 @@ impl GqlIntervals { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page_rev( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; diff --git a/raphtory-graphql/src/model/graph/meta_graph.rs b/raphtory-graphql/src/model/graph/meta_graph.rs index 534183fde3..30aac5eff9 100644 --- a/raphtory-graphql/src/model/graph/meta_graph.rs +++ b/raphtory-graphql/src/model/graph/meta_graph.rs @@ -13,7 +13,10 @@ use raphtory::{ use std::{cmp::Ordering, sync::Arc}; use tokio::sync::OnceCell; -/// +/// Lightweight summary of a stored graph — its name, path, counts, and +/// filesystem timestamps — served without deserializing the full graph. +/// Useful for listing what's available on the server before committing to a +/// full load. #[derive(ResolvedObject, Clone)] pub(crate) struct MetaGraph { folder: ExistingGraphFolder, diff --git a/raphtory-graphql/src/model/graph/mod.rs b/raphtory-graphql/src/model/graph/mod.rs index 3464a89bcc..1791b75cfe 100644 --- a/raphtory-graphql/src/model/graph/mod.rs +++ b/raphtory-graphql/src/model/graph/mod.rs @@ -15,6 +15,7 @@ pub(crate) mod mutable_graph; pub(crate) mod namespace; pub(crate) mod namespaced_item; pub(crate) mod node; +pub(crate) mod node_id; mod nodes; mod path_from_node; pub(crate) mod property; diff --git a/raphtory-graphql/src/model/graph/mutable_graph.rs b/raphtory-graphql/src/model/graph/mutable_graph.rs index 3ac4600540..6c672a6fd3 100644 --- a/raphtory-graphql/src/model/graph/mutable_graph.rs +++ b/raphtory-graphql/src/model/graph/mutable_graph.rs @@ -1,16 +1,25 @@ use crate::{ graph::{GraphWithVectors, UpdateEmbeddings}, - model::graph::{edge::GqlEdge, graph::GqlGraph, node::GqlNode, property::Value}, + model::{ + graph::{ + edge::GqlEdge, graph::GqlGraph, node::GqlNode, node_id::GqlNodeId, property::Value, + timeindex::GqlTimeInput, + }, + GqlGraphType, + }, rayon::blocking_write, }; use dynamic_graphql::{InputObject, ResolvedObject, ResolvedObjectFields}; use itertools::Itertools; use raphtory::{ - db::graph::{edge::EdgeView, node::NodeView}, + db::{ + api::view::MaterializedGraph, + graph::{edge::EdgeView, node::NodeView}, + }, errors::GraphError, prelude::*, }; -use raphtory_api::core::storage::arc_str::OptionAsStr; +use raphtory_api::core::{storage::arc_str::OptionAsStr, utils::time::IntoTime}; use std::{ error::Error, fmt::{Debug, Display, Formatter}, @@ -77,16 +86,17 @@ pub struct GqlPropertyInput { #[derive(InputObject, Clone)] pub struct TemporalPropertyInput { - /// Time. - time: i64, + /// Time of the update — accepts the same forms as `TimeInput` (epoch + /// millis Int, RFC3339 string, or `{timestamp, eventId}` object). + time: GqlTimeInput, /// Properties. properties: Option>, } #[derive(InputObject, Clone)] pub struct NodeAddition { - /// Name. - name: String, + /// Node id (string or non-negative integer). + name: GqlNodeId, /// Node type. node_type: Option, /// Metadata. @@ -99,10 +109,10 @@ pub struct NodeAddition { #[derive(InputObject, Clone)] pub struct EdgeAddition { - /// Source node. - src: String, - /// Destination node. - dst: String, + /// Source node id (string or non-negative integer). + src: GqlNodeId, + /// Destination node id (string or non-negative integer). + dst: GqlNodeId, /// Layer. layer: Option, /// Metadata. @@ -111,6 +121,11 @@ pub struct EdgeAddition { updates: Option>, } +/// Write-enabled handle for a graph. Obtained by calling `updateGraph(path)` +/// on the root query with a path you have write permission for. Supports +/// adding nodes and edges (individually or in batches), attaching +/// properties/metadata, and looking up mutable `node`/`edge` handles. Use the +/// read-only `graph(path)` resolver for queries. #[derive(ResolvedObject, Clone)] #[graphql(name = "MutableGraph")] pub struct GqlMutableGraph { @@ -139,30 +154,61 @@ fn as_properties( #[ResolvedObjectFields] impl GqlMutableGraph { - /// Get the non-mutable graph. - async fn graph(&self) -> GqlGraph { - GqlGraph::new(self.graph.folder.clone(), self.graph.graph.clone()) + /// Read-only view of this graph — identical to what you'd get from + /// `graph(path:)` on the query root. Use this when you want to compose + /// queries on the graph you've just mutated. `graphType` lets you + /// re-interpret the graph at query time (see `graph(path:)` for + /// semantics); defaults to the stored graph's native type. + async fn graph( + &self, + #[graphql( + desc = "Optional override for graph semantics — `EVENT` treats every update as a point-in-time event, `PERSISTENT` carries values forward until overwritten or deleted. Defaults to the stored graph's native type." + )] + graph_type: Option, + ) -> GqlGraph { + let folder = self.graph.folder.clone(); + match graph_type { + Some(GqlGraphType::Event) => match self.graph.graph.clone() { + MaterializedGraph::EventGraph(g) => GqlGraph::new(folder, g), + MaterializedGraph::PersistentGraph(g) => GqlGraph::new(folder, g.event_graph()), + }, + Some(GqlGraphType::Persistent) => match self.graph.graph.clone() { + MaterializedGraph::EventGraph(g) => GqlGraph::new(folder, g.persistent_graph()), + MaterializedGraph::PersistentGraph(g) => GqlGraph::new(folder, g), + }, + None => GqlGraph::new(folder, self.graph.graph.clone()), + } } - /// Get mutable existing node. - async fn node(&self, name: String) -> Option { + /// Look up an existing node for mutation. Returns null if the node doesn't + /// exist; use `addNode` or `createNode` to create one. + + async fn node(&self, #[graphql(desc = "Node id.")] name: GqlNodeId) -> Option { self.graph.node(name).map(|n| GqlMutableNode::new(n)) } - /// Add a new node or add updates to an existing node. + /// Add a new node or append an update to an existing one. Upsert semantics: + /// no error if the node already exists — properties and type are merged. + async fn add_node( &self, - time: i64, - name: String, - properties: Option>, + #[graphql(desc = "Time of the event.")] time: GqlTimeInput, + #[graphql(desc = "Node id.")] name: GqlNodeId, + #[graphql(desc = "Optional property updates attached to this event.")] properties: Option< + Vec, + >, + #[graphql( + desc = "Optional node type to assign. If provided, sets the node's type at this event." + )] node_type: Option, + #[graphql(desc = "Optional layer name. If omitted, the default layer is used.")] layer: Option, ) -> Result { let self_clone = self.clone(); let node = blocking_write(move || { let prop_iter = as_properties(properties.unwrap_or(vec![]))?; let node = self_clone.graph.add_node( - time, + time.into_time(), &name, prop_iter, node_type.as_str(), @@ -179,20 +225,28 @@ impl GqlMutableGraph { Ok(GqlMutableNode::new(node)) } - /// Create a new node or fail if it already exists. + /// Create a new node or fail if it already exists. Strict alternative to + /// `addNode` — use this when you want to detect collisions. + async fn create_node( &self, - time: i64, - name: String, - properties: Option>, + #[graphql(desc = "Time of the create event.")] time: GqlTimeInput, + #[graphql(desc = "Node id.")] name: GqlNodeId, + #[graphql(desc = "Optional property updates attached to this event.")] properties: Option< + Vec, + >, + #[graphql( + desc = "Optional node type to assign. If provided, sets the node's type at this event." + )] node_type: Option, + #[graphql(desc = "Optional layer name. If omitted, the default layer is used.")] layer: Option, ) -> Result { let self_clone = self.clone(); let node = blocking_write(move || { let prop_iter = as_properties(properties.unwrap_or(vec![]))?; let node = self_clone.graph.create_node( - time, + time.into_time(), &name, prop_iter, node_type.as_str(), @@ -209,8 +263,18 @@ impl GqlMutableGraph { Ok(GqlMutableNode::new(node)) } - /// Add a batch of nodes. - async fn add_nodes(&self, nodes: Vec) -> Result { + /// Batch-add multiple nodes in one call. For each `NodeAddition`, applies every + /// update it carries (time/properties pairs), then optionally sets its node type + /// and adds any metadata. On partial failure, returns a `BatchFailures` error + /// describing which entries failed and why; otherwise returns true. + + async fn add_nodes( + &self, + #[graphql( + desc = "List of `NodeAddition` inputs, each specifying a node's name, optional type, layer, per-timestamp updates, and metadata." + )] + nodes: Vec, + ) -> Result { let self_clone = self.clone(); let (succeeded, batch_failures) = blocking_write(move || { @@ -218,15 +282,19 @@ impl GqlMutableGraph { .iter() .map(|node| { let node = node.clone(); - let name = node.name.as_str(); + let name = &node.name; let node_type = node.node_type.as_str(); let layer = node.layer.as_str(); for prop in node.updates.unwrap_or(vec![]) { let prop_iter = as_properties(prop.properties.unwrap_or(vec![]))?; - self_clone - .graph - .add_node(prop.time, name, prop_iter, node_type, layer)?; + self_clone.graph.add_node( + prop.time.into_time(), + name, + prop_iter, + node_type, + layer, + )?; } if let Some(node_type) = node.node_type.as_str() { self_clone.get_node_view(name)?.set_node_type(node_type)?; @@ -255,26 +323,38 @@ impl GqlMutableGraph { } } - /// Get a mutable existing edge. - async fn edge(&self, src: String, dst: String) -> Option { + /// Look up an existing edge for mutation. Returns null if no such edge exists. + + async fn edge( + &self, + #[graphql(desc = "Source node id.")] src: GqlNodeId, + #[graphql(desc = "Destination node id.")] dst: GqlNodeId, + ) -> Option { self.graph.edge(src, dst).map(|e| GqlMutableEdge::new(e)) } - /// Add a new edge or add updates to an existing edge. + /// Add a new edge or append an update to an existing one. Upsert semantics: + /// safe to call on an edge that already exists — creates missing endpoints if + /// needed. + async fn add_edge( &self, - time: i64, - src: String, - dst: String, - properties: Option>, + #[graphql(desc = "Time of the event.")] time: GqlTimeInput, + #[graphql(desc = "Source node id.")] src: GqlNodeId, + #[graphql(desc = "Destination node id.")] dst: GqlNodeId, + #[graphql(desc = "Optional property updates attached to this event.")] properties: Option< + Vec, + >, + #[graphql(desc = "Optional layer name. If omitted, the default layer is used.")] layer: Option, ) -> Result { let self_clone = self.clone(); let edge = blocking_write(move || { let prop_iter = as_properties(properties.unwrap_or(vec![]))?; - let edge = self_clone - .graph - .add_edge(time, src, dst, prop_iter, layer.as_str())?; + let edge = + self_clone + .graph + .add_edge(time.into_time(), src, dst, prop_iter, layer.as_str())?; Ok::<_, GraphError>(edge) }) @@ -286,28 +366,42 @@ impl GqlMutableGraph { Ok(GqlMutableEdge::new(edge)) } - /// Add a batch of edges. - async fn add_edges(&self, edges: Vec) -> Result { + /// Batch-add multiple edges in one call. For each `EdgeAddition`, applies every + /// update it carries, then adds any metadata. On partial failure, returns a + /// `BatchFailures` error describing which entries failed; otherwise returns + /// true. + + async fn add_edges( + &self, + #[graphql( + desc = "List of `EdgeAddition` inputs, each specifying an edge's `src`, `dst`, optional layer, per-timestamp updates, and metadata." + )] + edges: Vec, + ) -> Result { let self_clone = self.clone(); let (edge_pairs, failures) = blocking_write(move || { let edge_res: Vec<_> = edges .into_iter() .map(|edge| { - let src = edge.src.as_str(); - let dst = edge.dst.as_str(); + let src = &edge.src; + let dst = &edge.dst; let layer = edge.layer.as_str(); for prop in edge.updates.unwrap_or(vec![]) { let prop_iter = as_properties(prop.properties.unwrap_or(vec![]))?; - self_clone - .graph - .add_edge(prop.time, src, dst, prop_iter, layer)?; + self_clone.graph.add_edge( + prop.time.into_time(), + src, + dst, + prop_iter, + layer, + )?; } let metadata = edge.metadata.unwrap_or(vec![]); if !metadata.is_empty() { let prop_iter = as_properties(metadata)?; self_clone - .get_edge_view(src.to_string(), dst.to_string())? + .get_edge_view(src, dst)? .add_metadata(prop_iter, layer)?; } Ok((edge.src, edge.dst)) @@ -327,19 +421,24 @@ impl GqlMutableGraph { } } - /// Mark an edge as deleted (creates the edge if it did not exist). + /// Mark an edge as deleted at the given time. Persistent graphs treat this + /// as a tombstone (the edge becomes invalid from `time` onwards); event + /// graphs simply log the deletion event. Creates the edge first if it did + /// not exist. + async fn delete_edge( &self, - time: i64, - src: String, - dst: String, + #[graphql(desc = "Time of the deletion.")] time: GqlTimeInput, + #[graphql(desc = "Source node id.")] src: GqlNodeId, + #[graphql(desc = "Destination node id.")] dst: GqlNodeId, + #[graphql(desc = "Optional layer name. If omitted, the default layer is used.")] layer: Option, ) -> Result { let self_clone = self.clone(); let edge = blocking_write(move || { let edge = self_clone .graph - .delete_edge(time, src, dst, layer.as_str())?; + .delete_edge(time.into_time(), src, dst, layer.as_str())?; Ok::<_, GraphError>(edge) }) @@ -351,17 +450,19 @@ impl GqlMutableGraph { Ok(GqlMutableEdge::new(edge)) } - /// Add temporal properties to graph. + /// Add temporal properties to the graph itself (not a node or edge). Each + /// call records a property update at `t`. + async fn add_properties( &self, - t: i64, - properties: Vec, + #[graphql(desc = "Time of the update.")] t: GqlTimeInput, + #[graphql(desc = "List of `{key, value}` pairs to set.")] properties: Vec, ) -> Result { let self_clone = self.clone(); let result = blocking_write(move || { self_clone .graph - .add_properties(t, as_properties(properties)?)?; + .add_properties(t.into_time(), as_properties(properties)?)?; Ok(true) }) .await; @@ -371,8 +472,15 @@ impl GqlMutableGraph { result } - /// Add metadata to graph (errors if the property already exists). - async fn add_metadata(&self, properties: Vec) -> Result { + /// Add metadata to the graph itself. Errors if any of the keys already + /// exists — use `updateMetadata` to overwrite. + + async fn add_metadata( + &self, + #[graphql(desc = "List of `{key, value}` pairs to set as metadata.")] properties: Vec< + GqlPropertyInput, + >, + ) -> Result { let self_clone = self.clone(); let result = blocking_write(move || { self_clone.graph.add_metadata(as_properties(properties)?)?; @@ -384,8 +492,15 @@ impl GqlMutableGraph { result } - /// Update metadata of the graph (overwrites existing values). - async fn update_metadata(&self, properties: Vec) -> Result { + /// Update metadata of the graph itself, overwriting any existing values for + /// the given keys. + + async fn update_metadata( + &self, + #[graphql(desc = "List of `{key, value}` pairs to upsert.")] properties: Vec< + GqlPropertyInput, + >, + ) -> Result { let self_clone = self.clone(); let result = blocking_write(move || { self_clone @@ -402,22 +517,25 @@ impl GqlMutableGraph { } impl GqlMutableGraph { - fn get_node_view(&self, name: &str) -> Result, GraphError> { + fn get_node_view( + &self, + name: &GqlNodeId, + ) -> Result, GraphError> { self.graph .node(name) - .ok_or_else(|| GraphError::NodeMissingError(GID::Str(name.to_owned()))) + .ok_or_else(|| GraphError::NodeMissingError(name.0.clone())) } fn get_edge_view( &self, - src: String, - dst: String, + src: &GqlNodeId, + dst: &GqlNodeId, ) -> Result, GraphError> { self.graph - .edge(src.clone(), dst.clone()) - .ok_or(GraphError::EdgeMissingError { - src: GID::Str(src), - dst: GID::Str(dst), + .edge(src, dst) + .ok_or_else(|| GraphError::EdgeMissingError { + src: src.0.clone(), + dst: dst.0.clone(), }) } @@ -427,6 +545,9 @@ impl GqlMutableGraph { } } +/// Write-side handle for a single node — returned from `addNode`, `createNode`, +/// or `MutableGraph.node`. Supports adding updates, setting node type, and +/// attaching or updating metadata. #[derive(ResolvedObject, Clone)] #[graphql(name = "MutableNode")] pub struct GqlMutableNode { @@ -451,8 +572,15 @@ impl GqlMutableNode { self.node.clone().into() } - /// Add metadata to the node (errors if the property already exists). - async fn add_metadata(&self, properties: Vec) -> Result { + /// Add metadata to this node. Errors if any of the keys already exists — + /// use `updateMetadata` to overwrite. + + async fn add_metadata( + &self, + #[graphql(desc = "List of `{key, value}` pairs to set as metadata.")] properties: Vec< + GqlPropertyInput, + >, + ) -> Result { let self_clone = self.clone(); blocking_write(move || { self_clone.node.add_metadata(as_properties(properties)?)?; @@ -465,8 +593,13 @@ impl GqlMutableNode { Ok(true) } - /// Set the node type (errors if the node already has a non-default type). - async fn set_node_type(&self, new_type: String) -> Result { + /// Set this node's type. Errors if the node already has a non-default + /// type and you're trying to change it. + + async fn set_node_type( + &self, + #[graphql(desc = "Node-type name to assign.")] new_type: String, + ) -> Result { let self_clone = self.clone(); blocking_write(move || { self_clone.node.set_node_type(&new_type)?; @@ -479,8 +612,15 @@ impl GqlMutableNode { Ok(true) } - /// Update metadata of the node (overwrites existing property values). - async fn update_metadata(&self, properties: Vec) -> Result { + /// Update metadata of this node, overwriting any existing values for the + /// given keys. + + async fn update_metadata( + &self, + #[graphql(desc = "List of `{key, value}` pairs to upsert.")] properties: Vec< + GqlPropertyInput, + >, + ) -> Result { let self_clone = self.clone(); blocking_write(move || { self_clone @@ -496,17 +636,20 @@ impl GqlMutableNode { Ok(true) } - /// Add temporal property updates to the node. + /// Append a property update to this node at a specific time. + async fn add_updates( &self, - time: i64, + #[graphql(desc = "Time of the update.")] time: GqlTimeInput, + #[graphql(desc = "Optional `{key, value}` pairs attached to the event.")] properties: Option>, + #[graphql(desc = "Optional layer name. If omitted, the default layer is used.")] layer: Option, ) -> Result { let self_clone = self.clone(); blocking_write(move || { self_clone.node.add_updates( - time, + time.into_time(), as_properties(properties.unwrap_or(vec![]))?, layer.as_str(), )?; @@ -528,6 +671,9 @@ impl GqlMutableNode { } } +/// Write-side handle for a single edge — returned from `addEdge` or +/// `MutableGraph.edge`. Supports adding updates, deletions, and attaching +/// or updating metadata. #[derive(ResolvedObject, Clone)] #[graphql(name = "MutableEdge")] pub struct GqlMutableEdge { @@ -562,11 +708,21 @@ impl GqlMutableEdge { GqlMutableNode::new(self.edge.dst()) } - /// Mark the edge as deleted at time time. - async fn delete(&self, time: i64, layer: Option) -> Result { + /// Mark this edge as deleted at the given time. Persistent graphs treat this + /// as a tombstone (the edge becomes invalid from `time` onwards); event + /// graphs simply log the deletion event. + + async fn delete( + &self, + #[graphql(desc = "Time of the deletion.")] time: GqlTimeInput, + #[graphql( + desc = "Optional layer name. If omitted, uses the layer the edge was originally added on (when called after `addEdge`)." + )] + layer: Option, + ) -> Result { let self_clone = self.clone(); blocking_write(move || { - self_clone.edge.delete(time, layer.as_str())?; + self_clone.edge.delete(time.into_time(), layer.as_str())?; Ok::<_, GraphError>(()) }) .await?; @@ -577,14 +733,18 @@ impl GqlMutableEdge { Ok(true) } - /// Add metadata to the edge (errors if the value already exists). - /// - /// If this is called after add_edge, the layer is inherited from the add_edge and does not - /// need to be specified again. + /// Add metadata to this edge. Errors if any of the keys already exists — + /// use `updateMetadata` to overwrite. If this is called after `addEdge`, + /// the layer is inherited and does not need to be specified again. + async fn add_metadata( &self, - properties: Vec, - layer: Option, + #[graphql(desc = "List of `{key, value}` pairs to set as metadata.")] properties: Vec< + GqlPropertyInput, + >, + #[graphql(desc = "Optional layer name; defaults to the inherited layer.")] layer: Option< + String, + >, ) -> Result { let self_clone = self.clone(); blocking_write(move || { @@ -602,14 +762,18 @@ impl GqlMutableEdge { Ok(true) } - /// Update metadata of the edge (existing values are overwritten). - /// - /// If this is called after add_edge, the layer is inherited from the add_edge and does not - /// need to be specified again. + /// Update metadata of this edge, overwriting any existing values for the + /// given keys. If this is called after `addEdge`, the layer is inherited + /// and does not need to be specified again. + async fn update_metadata( &self, - properties: Vec, - layer: Option, + #[graphql(desc = "List of `{key, value}` pairs to upsert.")] properties: Vec< + GqlPropertyInput, + >, + #[graphql(desc = "Optional layer name; defaults to the inherited layer.")] layer: Option< + String, + >, ) -> Result { let self_clone = self.clone(); blocking_write(move || { @@ -627,20 +791,23 @@ impl GqlMutableEdge { Ok(true) } - /// Add temporal property updates to the edge. - /// - /// If this is called after add_edge, the layer is inherited from the add_edge and does not - /// need to be specified again. + /// Append a property update to this edge at a specific time. If called + /// after `addEdge`, the layer is inherited and does not need to be + /// specified again. + async fn add_updates( &self, - time: i64, + #[graphql(desc = "Time of the update.")] time: GqlTimeInput, + #[graphql(desc = "Optional `{key, value}` pairs attached to the event.")] properties: Option>, - layer: Option, + #[graphql(desc = "Optional layer name; defaults to the inherited layer.")] layer: Option< + String, + >, ) -> Result { let self_clone = self.clone(); blocking_write(move || { self_clone.edge.add_updates( - time, + time.into_time(), as_properties(properties.unwrap_or(vec![]))?, layer.as_str(), )?; diff --git a/raphtory-graphql/src/model/graph/namespace.rs b/raphtory-graphql/src/model/graph/namespace.rs index e80f09f4b3..cdf13fb9bf 100644 --- a/raphtory-graphql/src/model/graph/namespace.rs +++ b/raphtory-graphql/src/model/graph/namespace.rs @@ -13,6 +13,10 @@ use itertools::Itertools; use std::{path::PathBuf, sync::Arc}; use walkdir::WalkDir; +/// A directory-like container for graphs and nested namespaces. Graphs are +/// addressed by path (e.g. `"team/project/graph"`), and every segment except +/// the last is a namespace. Use to browse what's stored on the server without +/// loading any graph data. #[derive(ResolvedObject, Clone, Ord, Eq, PartialEq, PartialOrd)] pub(crate) struct Namespace { current_dir: PathBuf, // always validated @@ -159,6 +163,9 @@ fn is_namespace_visible( #[ResolvedObjectFields] impl Namespace { + /// Graphs directly inside this namespace (excludes graphs in nested + /// namespaces). Filtered by the caller's permissions — only graphs the + /// caller is allowed to see are returned. async fn graphs(&self, ctx: &Context<'_>) -> GqlCollection { let data = ctx.data_unchecked::(); let self_clone = self.clone(); @@ -178,10 +185,13 @@ impl Namespace { .collect(), ) } + /// Path of this namespace relative to the root namespace. Empty string for + /// the root namespace itself. async fn path(&self) -> String { self.relative_path.clone() } + /// Parent namespace, or null at the root. async fn parent(&self) -> Option { if self.relative_path.is_empty() { None @@ -198,6 +208,8 @@ impl Namespace { } } + /// Sub-namespaces directly inside this one (one level down, not recursive). + /// Filtered by permissions. async fn children(&self, ctx: &Context<'_>) -> GqlCollection { let data = ctx.data_unchecked::(); let self_clone = self.clone(); @@ -218,8 +230,9 @@ impl Namespace { ) } - // Fetch the collection of namespaces/graphs in this namespace. - // Namespaces will be listed before graphs. + /// Everything in this namespace — sub-namespaces and graphs — as a single + /// heterogeneous collection. Sub-namespaces are listed before graphs. + /// Filtered by permissions. async fn items(&self, ctx: &Context<'_>) -> GqlCollection { let data = ctx.data_unchecked::(); let self_clone = self.clone(); diff --git a/raphtory-graphql/src/model/graph/node.rs b/raphtory-graphql/src/model/graph/node.rs index c21e19c54f..7b429480f5 100644 --- a/raphtory-graphql/src/model/graph/node.rs +++ b/raphtory-graphql/src/model/graph/node.rs @@ -3,6 +3,7 @@ use crate::{ edges::GqlEdges, filtering::{GqlEdgeFilter, GqlNodeFilter, NodeViewCollection}, history::GqlHistory, + node_id::GqlNodeId, nodes::GqlNodes, path_from_node::GqlPathFromNode, property::{GqlMetadata, GqlProperties}, @@ -53,9 +54,10 @@ impl From> for GqlNode /// /// Collections can be filtered and used to create lists. impl GqlNode { - /// Returns the unique id of the node. - async fn id(&self) -> String { - self.vv.id().to_string() + /// Returns the unique id of the node — `String` for string-indexed + /// graphs, non-negative `Int` for integer-indexed graphs. + async fn id(&self) -> GqlNodeId { + GqlNodeId(self.vv.id()) } /// Returns the name of the node. @@ -73,24 +75,37 @@ impl GqlNode { } /// Return a view of node containing all layers specified. - async fn layers(&self, names: Vec) -> GqlNode { + + async fn layers( + &self, + #[graphql(desc = "Layer names to include.")] names: Vec, + ) -> GqlNode { let self_clone = self.clone(); blocking_compute(move || self_clone.vv.valid_layers(names).into()).await } /// Returns a collection containing nodes belonging to all layers except the excluded list of layers. - async fn exclude_layers(&self, names: Vec) -> GqlNode { + + async fn exclude_layers( + &self, + #[graphql(desc = "Layer names to exclude.")] names: Vec, + ) -> GqlNode { let self_clone = self.clone(); blocking_compute(move || self_clone.vv.exclude_valid_layers(names).into()).await } /// Returns a collection containing nodes belonging to the specified layer. - async fn layer(&self, name: String) -> GqlNode { + + async fn layer(&self, #[graphql(desc = "Layer name to include.")] name: String) -> GqlNode { self.vv.valid_layers(name).into() } /// Returns a collection containing nodes belonging to all layers except the excluded layer. - async fn exclude_layer(&self, name: String) -> GqlNode { + + async fn exclude_layer( + &self, + #[graphql(desc = "Layer name to exclude.")] name: String, + ) -> GqlNode { self.vv.exclude_valid_layers(name).into() } @@ -103,10 +118,20 @@ impl GqlNode { /// e.g. "1 month and 1 day" will align at the start of the day. /// Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before /// the start of the first window and/or after the end of the last window (i.e. not included in any window). + async fn rolling( &self, + #[graphql( + desc = "Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`)." + )] window: WindowDuration, + #[graphql( + desc = "Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap." + )] step: Option, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set)." + )] alignment_unit: Option, ) -> Result { let window = window.try_into_interval()?; @@ -126,9 +151,16 @@ impl GqlNode { /// alignment_unit optionally aligns the windows to the specified unit. "Unaligned" can be passed for no alignment. /// If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. /// e.g. "1 month and 1 day" will align at the start of the day. + async fn expanding( &self, + #[graphql( + desc = "How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`)." + )] step: WindowDuration, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`." + )] alignment_unit: Option, ) -> Result { let step = step.try_into_interval()?; @@ -141,12 +173,21 @@ impl GqlNode { } /// Create a view of the node including all events between the specified start (inclusive) and end (exclusive). - async fn window(&self, start: GqlTimeInput, end: GqlTimeInput) -> GqlNode { + + async fn window( + &self, + #[graphql(desc = "Inclusive lower bound.")] start: GqlTimeInput, + #[graphql(desc = "Exclusive upper bound.")] end: GqlTimeInput, + ) -> GqlNode { self.vv.window(start.into_time(), end.into_time()).into() } /// Create a view of the node including all events at a specified time. - async fn at(&self, time: GqlTimeInput) -> GqlNode { + + async fn at( + &self, + #[graphql(desc = "Instant to pin the view to.")] time: GqlTimeInput, + ) -> GqlNode { self.vv.at(time.into_time()).into() } @@ -157,7 +198,11 @@ impl GqlNode { } /// Create a view of the node including all events that are valid at the specified time. - async fn snapshot_at(&self, time: GqlTimeInput) -> GqlNode { + + async fn snapshot_at( + &self, + #[graphql(desc = "Instant at which entities must be valid.")] time: GqlTimeInput, + ) -> GqlNode { self.vv.snapshot_at(time.into_time()).into() } @@ -168,29 +213,54 @@ impl GqlNode { } /// Create a view of the node including all events before specified end time (exclusive). - async fn before(&self, time: GqlTimeInput) -> GqlNode { + + async fn before( + &self, + #[graphql(desc = "Exclusive upper bound.")] time: GqlTimeInput, + ) -> GqlNode { self.vv.before(time.into_time()).into() } /// Create a view of the node including all events after the specified start time (exclusive). - async fn after(&self, time: GqlTimeInput) -> GqlNode { + + async fn after( + &self, + #[graphql(desc = "Exclusive lower bound.")] time: GqlTimeInput, + ) -> GqlNode { self.vv.after(time.into_time()).into() } /// Shrink a Window to a specified start and end time, if these are earlier and later than the current start and end respectively. - async fn shrink_window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + + async fn shrink_window( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.vv .shrink_window(start.into_time(), end.into_time()) .into() } /// Set the start of the window to the larger of a specified start time and self.start(). - async fn shrink_start(&self, start: GqlTimeInput) -> Self { + + async fn shrink_start( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + ) -> Self { self.vv.shrink_start(start.into_time()).into() } /// Set the end of the window to the smaller of a specified end and self.end(). - async fn shrink_end(&self, end: GqlTimeInput) -> Self { + + async fn shrink_end( + &self, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.vv.shrink_end(end.into_time()).into() } diff --git a/raphtory-graphql/src/model/graph/node_id.rs b/raphtory-graphql/src/model/graph/node_id.rs new file mode 100644 index 0000000000..a23c4faa2f --- /dev/null +++ b/raphtory-graphql/src/model/graph/node_id.rs @@ -0,0 +1,58 @@ +use async_graphql::{Error, Value as GqlValue}; +use dynamic_graphql::{Scalar, ScalarValue}; +use raphtory::core::entities::nodes::node_ref::{AsNodeRef, NodeRef}; +use raphtory_api::core::entities::GID; +use serde::{Deserialize, Serialize}; +use serde_json::Number; + +/// Identifier for a node — either a string (`"alice"`) or a non-negative +/// integer (`42`). Use whichever form matches how the graph was indexed +/// when nodes were added. +#[derive(Scalar, Clone, Debug, Serialize, Deserialize)] +#[graphql(name = "NodeId")] +pub struct GqlNodeId(pub GID); + +impl ScalarValue for GqlNodeId { + fn from_value(value: GqlValue) -> Result { + match value { + GqlValue::String(s) => Ok(GqlNodeId(GID::Str(s))), + GqlValue::Number(n) => n + .as_u64() + .map(|u| GqlNodeId(GID::U64(u))) + .ok_or_else(|| Error::new("NodeId integer must be a non-negative Int.")), + _ => Err(Error::new( + "Expected NodeId as a String or non-negative Int.", + )), + } + } + + fn to_value(&self) -> GqlValue { + match &self.0 { + GID::Str(s) => GqlValue::String(s.clone()), + GID::U64(u) => GqlValue::Number(Number::from(*u)), + } + } +} + +impl From for GID { + fn from(value: GqlNodeId) -> GID { + value.0 + } +} + +impl AsNodeRef for GqlNodeId { + fn as_node_ref(&self) -> NodeRef<'_> { + self.0.as_node_ref() + } +} + +impl GqlNodeId { + /// Returns the id as a `String`. Integer ids are formatted as decimal. + /// Useful for callers that need a string id. + pub fn to_string(&self) -> String { + match &self.0 { + GID::Str(s) => s.clone(), + GID::U64(u) => u.to_string(), + } + } +} diff --git a/raphtory-graphql/src/model/graph/nodes.rs b/raphtory-graphql/src/model/graph/nodes.rs index 99c6021f0b..515f6a0c12 100644 --- a/raphtory-graphql/src/model/graph/nodes.rs +++ b/raphtory-graphql/src/model/graph/nodes.rs @@ -33,6 +33,9 @@ use raphtory::{ use raphtory_api::core::{entities::VID, utils::time::IntoTime}; use std::cmp::Ordering; +/// A lazy collection of nodes from a graph view. Supports all the same view +/// transforms as `Graph` (window, layer, filter, ...) plus pagination and +/// sorting. Iterated via `list` / `page` / `ids` / `count`. #[derive(ResolvedObject, Clone)] #[graphql(name = "Nodes")] pub(crate) struct GqlNodes { @@ -70,23 +73,36 @@ impl GqlNodes { } /// Return a view of the nodes containing all layers specified. - async fn layers(&self, names: Vec) -> Self { + + async fn layers( + &self, + #[graphql(desc = "Layer names to include.")] names: Vec, + ) -> Self { self.update(self.nn.valid_layers(names)) } /// Return a view of the nodes containing all layers except those specified. - async fn exclude_layers(&self, names: Vec) -> Self { + + async fn exclude_layers( + &self, + #[graphql(desc = "Layer names to exclude.")] names: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || self_clone.update(self_clone.nn.exclude_valid_layers(names))).await } /// Return a view of the nodes containing the specified layer. - async fn layer(&self, name: String) -> Self { + + async fn layer(&self, #[graphql(desc = "Layer name to include.")] name: String) -> Self { self.update(self.nn.valid_layers(name)) } /// Return a view of the nodes containing all layers except those specified. - async fn exclude_layer(&self, name: String) -> Self { + + async fn exclude_layer( + &self, + #[graphql(desc = "Layer name to exclude.")] name: String, + ) -> Self { self.update(self.nn.exclude_valid_layers(name)) } @@ -99,10 +115,20 @@ impl GqlNodes { /// e.g. "1 month and 1 day" will align at the start of the day. /// Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before /// the start of the first window and/or after the end of the last window (i.e. not included in any window). + async fn rolling( &self, + #[graphql( + desc = "Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`)." + )] window: WindowDuration, + #[graphql( + desc = "Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap." + )] step: Option, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set)." + )] alignment_unit: Option, ) -> Result { let window = window.try_into_interval()?; @@ -122,9 +148,16 @@ impl GqlNodes { /// alignment_unit optionally aligns the windows to the specified unit. "Unaligned" can be passed for no alignment. /// If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. /// e.g. "1 month and 1 day" will align at the start of the day. + async fn expanding( &self, + #[graphql( + desc = "How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`)." + )] step: WindowDuration, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`." + )] alignment_unit: Option, ) -> Result { let step = step.try_into_interval()?; @@ -137,12 +170,21 @@ impl GqlNodes { } /// Create a view of the node including all events between the specified start (inclusive) and end (exclusive). - async fn window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + + async fn window( + &self, + #[graphql(desc = "Inclusive lower bound.")] start: GqlTimeInput, + #[graphql(desc = "Exclusive upper bound.")] end: GqlTimeInput, + ) -> Self { self.update(self.nn.window(start.into_time(), end.into_time())) } /// Create a view of the nodes including all events at a specified time. - async fn at(&self, time: GqlTimeInput) -> Self { + + async fn at( + &self, + #[graphql(desc = "Instant to pin the view to.")] time: GqlTimeInput, + ) -> Self { self.update(self.nn.at(time.into_time())) } @@ -153,7 +195,11 @@ impl GqlNodes { } /// Create a view of the nodes including all events that are valid at the specified time. - async fn snapshot_at(&self, time: GqlTimeInput) -> Self { + + async fn snapshot_at( + &self, + #[graphql(desc = "Instant at which entities must be valid.")] time: GqlTimeInput, + ) -> Self { self.update(self.nn.snapshot_at(time.into_time())) } @@ -164,37 +210,70 @@ impl GqlNodes { } /// Create a view of the nodes including all events before specified end time (exclusive). - async fn before(&self, time: GqlTimeInput) -> Self { + + async fn before(&self, #[graphql(desc = "Exclusive upper bound.")] time: GqlTimeInput) -> Self { self.update(self.nn.before(time.into_time())) } /// Create a view of the nodes including all events after the specified start time (exclusive). - async fn after(&self, time: GqlTimeInput) -> Self { + + async fn after(&self, #[graphql(desc = "Exclusive lower bound.")] time: GqlTimeInput) -> Self { self.update(self.nn.after(time.into_time())) } /// Shrink both the start and end of the window. - async fn shrink_window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + + async fn shrink_window( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.update(self.nn.shrink_window(start.into_time(), end.into_time())) } /// Set the start of the window to the larger of a specified start time and self.start(). - async fn shrink_start(&self, start: GqlTimeInput) -> Self { + + async fn shrink_start( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + ) -> Self { self.update(self.nn.shrink_start(start.into_time())) } /// Set the end of the window to the smaller of a specified end and self.end(). - async fn shrink_end(&self, end: GqlTimeInput) -> Self { + + async fn shrink_end( + &self, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.update(self.nn.shrink_end(end.into_time())) } /// Filter nodes by node type. - async fn type_filter(&self, node_types: Vec) -> Self { + + async fn type_filter( + &self, + #[graphql(desc = "Node-type names to keep.")] node_types: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || self_clone.update(self_clone.nn.type_filter(&node_types))).await } - async fn apply_views(&self, views: Vec) -> Result { + /// Apply a list of views in the given order and return the resulting nodes + /// collection. Lets callers compose window, layer, filter, and snapshot + /// operations in a single call. + + async fn apply_views( + &self, + #[graphql( + desc = "Ordered list of view operations; each entry is a one-of variant (`window`, `layer`, `filter`, etc.) applied to the running result." + )] + views: Vec, + ) -> Result { let mut return_view: GqlNodes = GqlNodes::new(self.nn.clone()); for view in views { return_view = match view { @@ -250,7 +329,16 @@ impl GqlNodes { //// Sorting //// ///////////////// - async fn sorted(&self, sort_bys: Vec) -> Self { + /// Sort the nodes. Multiple criteria are applied lexicographically (ties on the + /// first key break to the second, etc.). + + async fn sorted( + &self, + #[graphql( + desc = "Ordered list of sort keys. Each entry chooses exactly one of `id` / `time` / `property`, with an optional `reverse: true` to flip order." + )] + sort_bys: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || { let sorted: Index = self_clone @@ -320,6 +408,7 @@ impl GqlNodes { //// List /////// ///////////////// + /// Number of nodes in the current view. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.nn.len()).await @@ -330,11 +419,16 @@ impl GqlNodes { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> Result> { check_page_limit(ctx, limit)?; @@ -346,20 +440,45 @@ impl GqlNodes { .await) } + /// Materialise every node in the view. Rejected by the server when bulk list + /// endpoints are disabled; use `page` for paginated access instead. async fn list(&self, ctx: &Context<'_>) -> Result> { check_list_allowed(ctx)?; let self_clone = self.clone(); Ok(blocking_compute(move || self_clone.iter().collect()).await) } - /// Returns a view of the node ids. - async fn ids(&self) -> Vec { + /// Every node's id (name) as a flat list of strings. Rejected by the server when + /// bulk list endpoints are disabled. + async fn ids(&self, ctx: &Context<'_>) -> Result> { + check_list_allowed(ctx)?; let self_clone = self.clone(); - blocking_compute(move || self_clone.nn.iter_unlocked().map(|nn| nn.name()).collect()).await + Ok( + blocking_compute(move || self_clone.nn.iter_unlocked().map(|nn| nn.name()).collect()) + .await, + ) } - /// Returns a filtered view that applies to list down the chain - async fn filter(&self, expr: GqlNodeFilter) -> Result { + /// Narrow the collection to nodes matching `expr`. The filter sticks to the + /// returned view — every subsequent traversal through these nodes (their + /// neighbours, edges, properties) continues to see the filtered scope. + /// + /// Useful when you want one scoping rule to apply across the whole query. + /// E.g. restricting everything to a specific week: + /// + /// ```text + /// nodes { filter(expr: {window: {start: 1234, end: 5678}}) { + /// list { neighbours { list { name } } } # neighbours still windowed + /// } } + /// ``` + /// + /// Contrast with `select`, which applies here and is not carried through. + + async fn filter( + &self, + #[graphql(desc = "Composite node filter (by name, property, type, etc.).")] + expr: GqlNodeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeNodeFilter = expr.try_into()?; @@ -369,8 +488,30 @@ impl GqlNodes { .await } - /// Returns filtered list of nodes - async fn select(&self, expr: GqlNodeFilter) -> Result { + /// Narrow the collection to nodes matching `expr`, but only at this step — + /// subsequent traversals out of these nodes see the unfiltered graph again. + /// + /// Useful when you want different scopes at different hops. E.g. nodes + /// active on Monday, then their neighbours active on Tuesday, then *those* + /// neighbours active on Wednesday: + /// + /// ```text + /// nodes { select(expr: {window: {...monday...}}) { + /// list { neighbours { select(expr: {window: {...tuesday...}}) { + /// list { neighbours { select(expr: {window: {...wednesday...}}) { + /// list { name } + /// } } } + /// } } } + /// } } + /// ``` + /// + /// Contrast with `filter`, which persists the scope through subsequent ops. + + async fn select( + &self, + #[graphql(desc = "Composite node filter (by name, property, type, etc.).")] + expr: GqlNodeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeNodeFilter = expr.try_into()?; diff --git a/raphtory-graphql/src/model/graph/path_from_node.rs b/raphtory-graphql/src/model/graph/path_from_node.rs index 83a193f83f..4a41c4a573 100644 --- a/raphtory-graphql/src/model/graph/path_from_node.rs +++ b/raphtory-graphql/src/model/graph/path_from_node.rs @@ -22,6 +22,10 @@ use raphtory::{ }; use raphtory_api::core::utils::time::IntoTime; +/// A collection of nodes anchored to a source node — the result of traversals +/// like `node.neighbours`, `inNeighbours`, or `outNeighbours`. Supports all +/// the usual view transforms (window, layer, filter, ...) and can be chained +/// to walk further hops. #[derive(ResolvedObject, Clone)] #[graphql(name = "PathFromNode")] pub(crate) struct GqlPathFromNode { @@ -52,24 +56,37 @@ impl GqlPathFromNode { //////////////////////// /// Returns a view of PathFromNode containing the specified layer, errors if the layer does not exist. - async fn layers(&self, names: Vec) -> Self { + + async fn layers( + &self, + #[graphql(desc = "Layer names to include.")] names: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || self_clone.update(self_clone.nn.valid_layers(names))).await } /// Return a view of PathFromNode containing all layers except the specified excluded layers, errors if any of the layers do not exist. - async fn exclude_layers(&self, names: Vec) -> Self { + + async fn exclude_layers( + &self, + #[graphql(desc = "Layer names to exclude.")] names: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || self_clone.update(self_clone.nn.exclude_valid_layers(names))).await } /// Return a view of PathFromNode containing the layer specified layer, errors if the layer does not exist. - async fn layer(&self, name: String) -> Self { + + async fn layer(&self, #[graphql(desc = "Layer name to include.")] name: String) -> Self { self.update(self.nn.valid_layers(name)) } /// Return a view of PathFromNode containing all layers except the specified excluded layers, errors if any of the layers do not exist. - async fn exclude_layer(&self, name: String) -> Self { + + async fn exclude_layer( + &self, + #[graphql(desc = "Layer name to exclude.")] name: String, + ) -> Self { self.update(self.nn.exclude_valid_layers(name)) } @@ -82,10 +99,20 @@ impl GqlPathFromNode { /// e.g. "1 month and 1 day" will align at the start of the day. /// Note that passing a step larger than window while alignment_unit is not "Unaligned" may lead to some entries appearing before /// the start of the first window and/or after the end of the last window (i.e. not included in any window). + async fn rolling( &self, + #[graphql( + desc = "Width of each window. Pass either `{epoch: }` for a discrete number of milliseconds (e.g. `{epoch: 1000}` for 1 second), or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}` or `{duration: 2 hours and 30 minutes}`)." + )] window: WindowDuration, + #[graphql( + desc = "Optional gap between the start of one window and the start of the next. Accepts the same `{epoch: }` or `{duration: }` values as `window`. Defaults to `window` — i.e. windows touch end-to-end with no overlap and no gap." + )] step: Option, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step` (or `window` if no step is set)." + )] alignment_unit: Option, ) -> Result { let window = window.try_into_interval()?; @@ -105,9 +132,16 @@ impl GqlPathFromNode { /// alignment_unit optionally aligns the windows to the specified unit. "Unaligned" can be passed for no alignment. /// If unspecified (i.e. by default), alignment is done on the smallest unit of time in the step. /// e.g. "1 month and 1 day" will align at the start of the day. + async fn expanding( &self, + #[graphql( + desc = "How much the window grows by on each step. Pass either `{epoch: }` for a discrete number of milliseconds, or `{duration: }` for a calendar duration (e.g. `{duration: 1 day}`)." + )] step: WindowDuration, + #[graphql( + desc = "Optional anchor for window boundaries — pass `Unaligned` to disable, or one of the unit values (e.g. `Day`, `Hour`, `Minute`) to align edges to that calendar unit. Defaults to the smallest unit present in `step`." + )] alignment_unit: Option, ) -> Result { let step = step.try_into_interval()?; @@ -120,12 +154,21 @@ impl GqlPathFromNode { } /// Create a view of the PathFromNode including all events between a specified start (inclusive) and end (exclusive). - async fn window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + + async fn window( + &self, + #[graphql(desc = "Inclusive lower bound.")] start: GqlTimeInput, + #[graphql(desc = "Exclusive upper bound.")] end: GqlTimeInput, + ) -> Self { self.update(self.nn.window(start.into_time(), end.into_time())) } /// Create a view of the PathFromNode including all events at time. - async fn at(&self, time: GqlTimeInput) -> Self { + + async fn at( + &self, + #[graphql(desc = "Instant to pin the view to.")] time: GqlTimeInput, + ) -> Self { self.update(self.nn.at(time.into_time())) } @@ -136,7 +179,11 @@ impl GqlPathFromNode { } /// Create a view of the PathFromNode including all events that are valid at the specified time. - async fn snapshot_at(&self, time: GqlTimeInput) -> Self { + + async fn snapshot_at( + &self, + #[graphql(desc = "Instant at which entities must be valid.")] time: GqlTimeInput, + ) -> Self { self.update(self.nn.snapshot_at(time.into_time())) } @@ -147,32 +194,55 @@ impl GqlPathFromNode { } /// Create a view of the PathFromNode including all events before the specified end (exclusive). - async fn before(&self, time: GqlTimeInput) -> Self { + + async fn before(&self, #[graphql(desc = "Exclusive upper bound.")] time: GqlTimeInput) -> Self { self.update(self.nn.before(time.into_time())) } /// Create a view of the PathFromNode including all events after the specified start (exclusive). - async fn after(&self, time: GqlTimeInput) -> Self { + + async fn after(&self, #[graphql(desc = "Exclusive lower bound.")] time: GqlTimeInput) -> Self { self.update(self.nn.after(time.into_time())) } /// Shrink both the start and end of the window. - async fn shrink_window(&self, start: GqlTimeInput, end: GqlTimeInput) -> Self { + + async fn shrink_window( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.update(self.nn.shrink_window(start.into_time(), end.into_time())) } /// Set the start of the window to the larger of the specified start and self.start(). - async fn shrink_start(&self, start: GqlTimeInput) -> Self { + + async fn shrink_start( + &self, + #[graphql(desc = "Proposed new start (TimeInput); ignored if it would widen the window.")] + start: GqlTimeInput, + ) -> Self { self.update(self.nn.shrink_start(start.into_time())) } /// Set the end of the window to the smaller of the specified end and self.end(). - async fn shrink_end(&self, end: GqlTimeInput) -> Self { + + async fn shrink_end( + &self, + #[graphql(desc = "Proposed new end (TimeInput); ignored if it would widen the window.")] + end: GqlTimeInput, + ) -> Self { self.update(self.nn.shrink_end(end.into_time())) } - /// Filter nodes by type. - async fn type_filter(&self, node_types: Vec) -> Self { + /// Narrow this path to neighbours whose node type is in the given set. + + async fn type_filter( + &self, + #[graphql(desc = "Node types to keep.")] node_types: Vec, + ) -> Self { let self_clone = self.clone(); blocking_compute(move || self_clone.update(self_clone.nn.type_filter(&node_types))).await } @@ -195,6 +265,7 @@ impl GqlPathFromNode { //// List /////// ///////////////// + /// Number of neighbour nodes reachable from the source in this view. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.nn.len()).await @@ -205,11 +276,16 @@ impl GqlPathFromNode { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -221,21 +297,29 @@ impl GqlPathFromNode { .await) } + /// Materialise every neighbour node in the path. Rejected by the server when + /// bulk list endpoints are disabled; use `page` for paginated access instead. async fn list(&self, ctx: &Context<'_>) -> async_graphql::Result> { check_list_allowed(ctx)?; let self_clone = self.clone(); Ok(blocking_compute(move || self_clone.iter().collect()).await) } - /// Returns the node ids. - async fn ids(&self) -> Vec { + /// Every neighbour node's id (name) as a flat list of strings. Rejected by the + /// server when bulk list endpoints are disabled. + async fn ids(&self, ctx: &Context<'_>) -> async_graphql::Result> { + check_list_allowed(ctx)?; let self_clone = self.clone(); - blocking_compute(move || self_clone.nn.name().collect()).await + Ok(blocking_compute(move || self_clone.nn.name().collect()).await) } /// Takes a specified selection of views and applies them in given order. + async fn apply_views( &self, + #[graphql( + desc = "Ordered list of view operations; each entry is a one-of variant (`window`, `layer`, `filter`, ...) applied to the running result." + )] views: Vec, ) -> Result { let mut return_view: GqlPathFromNode = self.clone(); @@ -281,8 +365,26 @@ impl GqlPathFromNode { Ok(return_view) } - /// Returns a filtered view that applies to list down the chain - async fn filter(&self, expr: GqlNodeFilter) -> Result { + /// Narrow the neighbour set to nodes matching `expr`. The filter sticks to + /// the returned path — every subsequent traversal (further hops, edges, + /// properties) continues to see the filtered scope. + /// + /// Useful when you want one scoping rule to apply across the whole query. + /// E.g. restricting the whole traversal to a specific week: + /// + /// ```text + /// node(name: "A") { neighbours { filter(expr: {window: {...week...}}) { + /// list { neighbours { list { name } } } # further hops still windowed + /// } } } + /// ``` + /// + /// Contrast with `select`, which applies here and is not carried through. + + async fn filter( + &self, + #[graphql(desc = "Composite node filter (by name, property, type, etc.).")] + expr: GqlNodeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeNodeFilter = expr.try_into()?; @@ -292,8 +394,27 @@ impl GqlPathFromNode { .await } - /// Returns filtered list of neighbour nodes - async fn select(&self, expr: GqlNodeFilter) -> Result { + /// Narrow the neighbour set to nodes matching `expr`, but only at this hop + /// — further traversals out of these nodes see the unfiltered graph again. + /// + /// Useful when each hop needs a different scope. E.g. neighbours active on + /// Monday, then *their* neighbours active on Tuesday: + /// + /// ```text + /// node(name: "A") { neighbours { select(expr: {window: {...monday...}}) { + /// list { neighbours { select(expr: {window: {...tuesday...}}) { + /// list { name } + /// } } } + /// } } } + /// ``` + /// + /// Contrast with `filter`, which persists the scope through subsequent ops. + + async fn select( + &self, + #[graphql(desc = "Composite node filter (by name, property, type, etc.).")] + expr: GqlNodeFilter, + ) -> Result { let self_clone = self.clone(); blocking_compute(move || { let filter: CompositeNodeFilter = expr.try_into()?; diff --git a/raphtory-graphql/src/model/graph/property.rs b/raphtory-graphql/src/model/graph/property.rs index f658431658..1153cc2876 100644 --- a/raphtory-graphql/src/model/graph/property.rs +++ b/raphtory-graphql/src/model/graph/property.rs @@ -238,6 +238,9 @@ fn prop_to_gql(prop: &Prop) -> GqlValue { } } +/// A single `(key, value)` property reading at a point in the graph view. +/// The value is exposed both as a typed scalar (`value`) and as a +/// human-readable string (`asString`). #[derive(Clone, ResolvedObject)] #[graphql(name = "Property")] pub(crate) struct GqlProperty { @@ -259,19 +262,28 @@ impl From<(String, Prop)> for GqlProperty { #[ResolvedObjectFields] impl GqlProperty { + /// The property key (name). async fn key(&self) -> String { self.key.clone() } + /// The property value rendered as a human-readable string (e.g. `"10"`, `"hello"`, + /// `"2024-01-01T00:00:00Z"`). For programmatic access use `value`, which returns + /// a typed scalar. async fn as_string(&self) -> String { self.prop.to_string() } + /// The property value as a typed `PropertyOutput` scalar — numbers come back as + /// numbers, booleans as booleans, strings as strings, etc. async fn value(&self) -> GqlPropertyOutputVal { GqlPropertyOutputVal(self.prop.clone()) } } +/// A `(time, value)` pair — the output type of temporal-property accessors +/// that need to report *when* a value was observed (e.g. `min`, `max`, +/// `median`, `orderedDedupe`). #[derive(ResolvedObject, Clone)] #[graphql(name = "PropertyTuple")] pub(crate) struct GqlPropertyTuple { @@ -293,20 +305,29 @@ impl From<(EventTime, Prop)> for GqlPropertyTuple { #[ResolvedObjectFields] impl GqlPropertyTuple { + /// The timestamp at which this value was recorded. async fn time(&self) -> GqlEventTime { self.time.into() } + /// The value rendered as a human-readable string. For programmatic access use + /// `value`, which returns a typed scalar. async fn as_string(&self) -> String { let self_clone = self.clone(); blocking_compute(move || self_clone.prop.to_string()).await } + /// The value as a typed `PropertyOutput` scalar — numbers come back as numbers, + /// booleans as booleans, etc. async fn value(&self) -> GqlPropertyOutputVal { GqlPropertyOutputVal(self.prop.clone()) } } +/// The full timeline of a single property key on one entity. Exposes every +/// update (via `values` / `history` / `orderedDedupe`), point lookups (`at`, +/// `latest`), and aggregates over the timeline (`sum`, `mean`, `min`, `max`, +/// `median`, `count`). #[derive(ResolvedObject, Clone)] #[graphql(name = "TemporalProperty")] pub(crate) struct GqlTemporalProperty { @@ -328,32 +349,49 @@ impl From<(String, TemporalPropertyView)> for GqlTemporalProperty { #[ResolvedObjectFields] impl GqlTemporalProperty { - /// Key of a property. + /// The property key (name). async fn key(&self) -> String { self.key.clone() } + /// Event history for this property — one entry per temporal update, in + /// insertion order. Use this to navigate the full timeline: access the + /// raw `timestamps` / `datetimes` / `eventId` lists, analyse gaps between + /// updates via `intervals` (mean/median/min/max), ask `isEmpty`, or + /// paginate the events. async fn history(&self) -> GqlHistory { let self_clone = self.clone(); blocking_compute(move || self_clone.prop.history().into()).await } - /// Return the values of the properties. + /// All values this property has ever taken, in temporal order (one per update). + /// Typed as `PropertyOutput` so numeric values stay numeric. async fn values(&self) -> Vec { let self_clone = self.clone(); blocking_compute(move || self_clone.prop.values().map(GqlPropertyOutputVal).collect()).await } - async fn at(&self, t: GqlTimeInput) -> Option { + /// The value at or before time `t` (latest update on or before `t`). Returns null + /// if no update exists on or before `t`. + + async fn at( + &self, + #[graphql( + desc = "A TimeInput (epoch millis integer, RFC3339 string, or `{timestamp, eventId}` object)." + )] + t: GqlTimeInput, + ) -> Option { let self_clone = self.clone(); blocking_compute(move || self_clone.prop.at(t.into_time()).map(GqlPropertyOutputVal)).await } + /// The most recent value, or null if the property has never been set in this view. async fn latest(&self) -> Option { let self_clone = self.clone(); blocking_compute(move || self_clone.prop.latest().map(GqlPropertyOutputVal)).await } + /// The set of distinct values this property has ever taken (order not guaranteed). async fn unique(&self) -> Vec { let self_clone = self.clone(); blocking_compute(move || { @@ -367,7 +405,15 @@ impl GqlTemporalProperty { .await } - async fn ordered_dedupe(&self, latest_time: bool) -> Vec { + /// Collapses runs of consecutive-equal updates into a single `(time, value)` pair. + + async fn ordered_dedupe( + &self, + #[graphql( + desc = "If true, each run is represented by its *last* timestamp; if false, by its *first*. Useful for compressing chatter in a timeline." + )] + latest_time: bool, + ) -> Vec { let self_clone = self.clone(); blocking_compute(move || { self_clone @@ -393,7 +439,7 @@ impl GqlTemporalProperty { blocking_compute(move || self_clone.prop.mean().map(GqlPropertyOutputVal)).await } - /// Alias for `mean`. + /// Alias for `mean` — same F64 average, same null cases. async fn average(&self) -> Option { let self_clone = self.clone(); blocking_compute(move || self_clone.prop.average().map(GqlPropertyOutputVal)).await @@ -420,13 +466,16 @@ impl GqlTemporalProperty { blocking_compute(move || self_clone.prop.median().map(GqlPropertyTuple::from)).await } - /// Number of updates. + /// Number of updates recorded for this property in the current view. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.prop.count()).await } } +/// All temporal properties of an entity (metadata is exposed separately). +/// Look up individual properties via `get` / `contains`, enumerate via +/// `keys` / `values`, or drop into `temporal` for time-aware accessors. #[derive(ResolvedObject, Clone)] #[graphql(name = "Properties")] pub(crate) struct GqlProperties { @@ -448,6 +497,9 @@ impl> From

for GqlProperties { } } +/// The temporal-only view of an entity's properties. Each entry is a +/// `TemporalProperty` carrying the full timeline for that key — use this when +/// you need per-update iteration, time-indexed lookups, or aggregates. #[derive(ResolvedObject, Clone)] #[graphql(name = "TemporalProperties")] pub(crate) struct GqlTemporalProperties { @@ -466,6 +518,10 @@ impl From for GqlTemporalProperties { } } +/// Constant key/value metadata attached to an entity (node, edge, or graph). +/// Metadata has no timeline — each key maps to exactly one value for the +/// lifetime of the entity. Separate from `Properties`, which carries +/// time-varying data. #[derive(ResolvedObject, Clone)] #[graphql(name = "Metadata")] pub(crate) struct GqlMetadata { @@ -486,19 +542,29 @@ impl> From

for GqlMetadata { #[ResolvedObjectFields] impl GqlProperties { - /// Get property value matching the specified key. - async fn get(&self, key: String) -> Option { + /// Look up a single property by key. Returns null if no property with that key + /// exists in the current view. + + async fn get( + &self, + #[graphql(desc = "The property name.")] key: String, + ) -> Option { self.props .get(key.as_str()) .map(|p| (key.to_string(), p).into()) } - /// Check if the key is in the properties. - async fn contains(&self, key: String) -> bool { + /// Returns true if a property with the given key exists in this view. + + async fn contains( + &self, + #[graphql(desc = "The property name to look up.")] key: String, + ) -> bool { self.props.get(&key).is_some() } - /// Return all property keys. + /// All property keys present in the current view. Does not include metadata + /// — metadata is exposed separately via the entity's `metadata` field. async fn keys(&self) -> Vec { let self_clone = self.clone(); blocking_compute(move || { @@ -511,8 +577,15 @@ impl GqlProperties { .await } - /// Return all property values. - async fn values(&self, keys: Option>) -> Vec { + /// Snapshot of property values, one `{key, value}` entry per property. + + async fn values( + &self, + #[graphql( + desc = "Optional whitelist. If provided, only properties with these keys are returned; if omitted or null, every property in the view is returned." + )] + keys: Option>, + ) -> Vec { let self_clone = self.clone(); blocking_compute(move || match keys { Some(keys) => self_clone @@ -536,6 +609,8 @@ impl GqlProperties { .await } + /// The temporal-only view of these properties — excludes metadata (which has no + /// history) and lets you drill into per-key timelines and aggregates. async fn temporal(&self) -> GqlTemporalProperties { self.props.temporal().into() } @@ -543,26 +618,42 @@ impl GqlProperties { #[ResolvedObjectFields] impl GqlMetadata { - /// Get metadata value matching the specified key. - async fn get(&self, key: String) -> Option { + /// Look up a single metadata value by key. Returns null if no metadata with that + /// key exists. + + async fn get( + &self, + #[graphql(desc = "The metadata name.")] key: String, + ) -> Option { self.props .get(key.as_str()) .map(|p| (key.to_string(), p).into()) } - /// /// Check if the key is in the metadata. - async fn contains(&self, key: String) -> bool { + /// Returns true if a metadata entry with the given key exists. + + async fn contains( + &self, + #[graphql(desc = "The metadata name to look up.")] key: String, + ) -> bool { self.props.contains(key.as_str()) } - /// Return all metadata keys. + /// All metadata keys present on this entity. async fn keys(&self) -> Vec { let self_clone = self.clone(); blocking_compute(move || self_clone.props.keys().map(|k| k.clone().into()).collect()).await } - /// /// Return all metadata values. - pub(crate) async fn values(&self, keys: Option>) -> Vec { + /// All metadata values as `{key, value}` entries. + + pub(crate) async fn values( + &self, + #[graphql( + desc = "Optional whitelist. If provided, only metadata with these keys is returned; if omitted, every metadata entry is returned." + )] + keys: Option>, + ) -> Vec { let self_clone = self.clone(); blocking_compute(move || match keys { Some(keys) => self_clone @@ -589,17 +680,26 @@ impl GqlMetadata { #[ResolvedObjectFields] impl GqlTemporalProperties { - /// Get property value matching the specified key. - async fn get(&self, key: String) -> Option { + /// Look up a single temporal property by key. Returns null if there's no temporal + /// property with that key. + + async fn get( + &self, + #[graphql(desc = "The property name.")] key: String, + ) -> Option { self.props.get(key.as_str()).map(move |p| (key, p).into()) } - /// Check if the key is in the properties. - async fn contains(&self, key: String) -> bool { + /// Returns true if a temporal property with the given key exists. + + async fn contains( + &self, + #[graphql(desc = "The property name to look up.")] key: String, + ) -> bool { self.props.get(&key).is_some() } - /// Return all property keys. + /// All temporal-property keys present in this view. async fn keys(&self) -> Vec { let self_clone = self.clone(); blocking_compute(move || { @@ -612,8 +712,16 @@ impl GqlTemporalProperties { .await } - /// Return all property values. - async fn values(&self, keys: Option>) -> Vec { + /// All temporal properties, each as a `TemporalProperty` with its full timeline + /// available. Use `history`, `values`, `latest`, `at`, etc. on each entry. + + async fn values( + &self, + #[graphql( + desc = "Optional whitelist. If provided, only temporal properties with these keys are returned; if omitted, every temporal property in the view is returned." + )] + keys: Option>, + ) -> Vec { let self_clone = self.clone(); blocking_compute(move || match keys { Some(keys) => self_clone diff --git a/raphtory-graphql/src/model/graph/timeindex.rs b/raphtory-graphql/src/model/graph/timeindex.rs index 840ef37688..61227eacb2 100644 --- a/raphtory-graphql/src/model/graph/timeindex.rs +++ b/raphtory-graphql/src/model/graph/timeindex.rs @@ -124,7 +124,14 @@ impl GqlEventTime { /// Defaults to RFC 3339 if not provided (e.g., "2023-12-25T10:30:45.123Z"). /// Refer to chrono::format::strftime for formatting specifiers and escape sequences. /// Raises an error if a time conversion fails. - async fn datetime(&self, format_string: Option) -> Result, Error> { + + async fn datetime( + &self, + #[graphql( + desc = "Optional format string for the rendered datetime. Uses `%`-style specifiers — for example `%Y-%m-%d` for `2024-01-15`, `%Y-%m-%d %H:%M:%S` for `2024-01-15 10:30:00`, or `%H:%M` for `10:30`. Defaults to RFC 3339 (e.g. `2024-01-15T10:30:45.123+00:00`) when omitted." + )] + format_string: Option, + ) -> Result, Error> { let fmt_string = format_string.as_deref().unwrap_or("%+"); // %+ is RFC 3339 if dt_format_str_is_valid(fmt_string) { self.inner diff --git a/raphtory-graphql/src/model/graph/vector_selection.rs b/raphtory-graphql/src/model/graph/vector_selection.rs index 9560d43cff..58e136a75b 100644 --- a/raphtory-graphql/src/model/graph/vector_selection.rs +++ b/raphtory-graphql/src/model/graph/vector_selection.rs @@ -2,6 +2,7 @@ use super::{ document::GqlDocument, edge::GqlEdge, node::GqlNode, + node_id::GqlNodeId, vectorised_graph::{IntoWindowTuple, VectorisedGraphWindow}, }; use crate::rayon::blocking_compute; @@ -14,12 +15,16 @@ use raphtory::{ #[derive(InputObject)] pub(super) struct InputEdge { - /// Source node. - src: String, - /// Destination node. - dst: String, + /// Source node id (string or non-negative integer). + src: GqlNodeId, + /// Destination node id (string or non-negative integer). + dst: GqlNodeId, } +/// A working set of documents / nodes / edges built up via similarity +/// searches on a `VectorisedGraph`. Selections are mutable: you can grow +/// them with more hops (`expand*`), dereference the contents (`nodes`, +/// `edges`, `getDocuments`), or start fresh with `emptySelection`. #[derive(ResolvedObject)] #[graphql(name = "VectorSelection")] pub(crate) struct GqlVectorSelection(VectorSelection); @@ -56,29 +61,45 @@ impl GqlVectorSelection { .collect()) } - /// Adds all the documents associated with the specified nodes to the current selection. - /// - /// Documents added by this call are assumed to have a score of 0. - async fn add_nodes(&self, nodes: Vec) -> Self { + /// Add every document associated with the named nodes to the selection. + /// Documents added this way receive a score of 0 (no similarity ranking). + + async fn add_nodes( + &self, + #[graphql(desc = "Node ids whose documents to include.")] nodes: Vec, + ) -> Self { let mut selection = self.cloned(); selection.add_nodes(nodes); selection.into() } - /// Adds all the documents associated with the specified edges to the current selection. - /// - /// Documents added by this call are assumed to have a score of 0. - async fn add_edges(&self, edges: Vec) -> Self { + /// Add every document associated with the named edges to the selection. + /// Documents added this way receive a score of 0 (no similarity ranking). + + async fn add_edges( + &self, + #[graphql(desc = "List of `{src, dst}` pairs identifying the edges.")] edges: Vec< + InputEdge, + >, + ) -> Self { let mut selection = self.cloned(); let edges = edges.into_iter().map(|edge| (edge.src, edge.dst)).collect(); selection.add_edges(edges); selection.into() } - /// Add all the documents a specified number of hops away to the selection. - /// - /// Two documents A and B are considered to be 1 hop away of each other if they are on the same entity or if they are on the same node and edge pair. - async fn expand(&self, hops: usize, window: Option) -> Self { + /// Grow the selection by including documents that are within `hops` of any + /// document already in the selection. Two documents are 1 hop apart if + /// they're on the same entity or on a connected node/edge pair. + + async fn expand( + &self, + #[graphql(desc = "Number of expansion rounds (1 = direct neighbours).")] hops: usize, + #[graphql( + desc = "Optional `{start, end}` to restrict expansion to entities active in that interval." + )] + window: Option, + ) -> Self { let window = window.into_window_tuple(); let mut selection = self.cloned(); blocking_compute(move || { @@ -88,11 +109,18 @@ impl GqlVectorSelection { .await } - /// Adds documents, from the set of one hop neighbours to the current selection, to the selection based on their similarity score with the specified query. This function loops so that the set of one hop neighbours expands on each loop and number of documents added is determined by the specified limit. + /// Iteratively expand the selection by similarity to a natural-language + /// query. Each pass takes the one-hop neighbour set of the current + /// selection and adds the highest-scoring entities (mixed nodes and + /// edges); the loop continues until `limit` entities have been added. + async fn expand_entities_by_similarity( &self, - query: String, - limit: usize, + #[graphql(desc = "Natural-language search string; embedded by the server.")] query: String, + #[graphql(desc = "Total number of entities to add across all passes.")] limit: usize, + #[graphql( + desc = "Optional `{start, end}` to restrict matches to entities active in that interval." + )] window: Option, ) -> GraphResult { let vector = self.embed_text(query).await?; @@ -104,11 +132,16 @@ impl GqlVectorSelection { Ok(selection.into()) } - /// Add the adjacent nodes with higher score for query to the selection up to a specified limit. This function loops like expand_entities_by_similarity but is restricted to nodes. + /// Like `expandEntitiesBySimilarity` but restricted to nodes — iteratively + /// add the highest-scoring adjacent nodes to the selection. + async fn expand_nodes_by_similarity( &self, - query: String, - limit: usize, + #[graphql(desc = "Natural-language search string; embedded by the server.")] query: String, + #[graphql(desc = "Total number of nodes to add across all passes.")] limit: usize, + #[graphql( + desc = "Optional `{start, end}` to restrict matches to nodes active in that interval." + )] window: Option, ) -> GraphResult { let vector = self.embed_text(query).await?; @@ -120,11 +153,16 @@ impl GqlVectorSelection { Ok(selection.into()) } - /// Add the adjacent edges with higher score for query to the selection up to a specified limit. This function loops like expand_entities_by_similarity but is restricted to edges. + /// Like `expandEntitiesBySimilarity` but restricted to edges — iteratively + /// add the highest-scoring adjacent edges to the selection. + async fn expand_edges_by_similarity( &self, - query: String, - limit: usize, + #[graphql(desc = "Natural-language search string; embedded by the server.")] query: String, + #[graphql(desc = "Total number of edges to add across all passes.")] limit: usize, + #[graphql( + desc = "Optional `{start, end}` to restrict matches to edges active in that interval." + )] window: Option, ) -> GraphResult { let vector = self.embed_text(query).await?; diff --git a/raphtory-graphql/src/model/graph/vectorised_graph.rs b/raphtory-graphql/src/model/graph/vectorised_graph.rs index 23e4479588..eb455b13f9 100644 --- a/raphtory-graphql/src/model/graph/vectorised_graph.rs +++ b/raphtory-graphql/src/model/graph/vectorised_graph.rs @@ -1,4 +1,4 @@ -use crate::rayon::blocking_compute; +use crate::{model::graph::timeindex::GqlTimeInput, rayon::blocking_compute}; use super::vector_selection::GqlVectorSelection; use dynamic_graphql::{InputObject, ResolvedObject, ResolvedObjectFields}; @@ -6,13 +6,14 @@ use raphtory::{ db::api::view::MaterializedGraph, errors::GraphResult, vectors::vectorised_graph::VectorisedGraph, }; +use raphtory_api::core::{storage::timeindex::AsTime, utils::time::IntoTime}; #[derive(InputObject)] pub(super) struct VectorisedGraphWindow { - /// Start time. - start: i64, - /// End time. - end: i64, + /// Inclusive lower bound of the search window. + start: GqlTimeInput, + /// Exclusive upper bound of the search window. + end: GqlTimeInput, } pub(super) trait IntoWindowTuple { @@ -21,10 +22,14 @@ pub(super) trait IntoWindowTuple { impl IntoWindowTuple for Option { fn into_window_tuple(self) -> Option<(i64, i64)> { - self.map(|window| (window.start, window.end)) + self.map(|window| (window.start.into_time().t(), window.end.into_time().t())) } } +/// A graph with embedded vector representations for its nodes and edges. +/// Exposes similarity search over documents, nodes, and edges, plus +/// selection building (`emptySelection`) and index maintenance +/// (`optimizeIndex`). #[derive(ResolvedObject)] #[graphql(name = "VectorisedGraph")] pub(crate) struct GqlVectorisedGraph(VectorisedGraph); @@ -37,7 +42,9 @@ impl From> for GqlVectorisedGraph { #[ResolvedObjectFields] impl GqlVectorisedGraph { - /// Optmize the vector index + /// Rebuild (or incrementally update) the on-disk vector indexes for nodes + /// and edges so subsequent similarity searches hit the fresh embeddings. + /// Safe to call repeatedly; returns true on success. async fn optimize_index(&self) -> GraphResult { self.0.optimize_index().await?; Ok(true) @@ -48,11 +55,17 @@ impl GqlVectorisedGraph { self.0.empty_selection().into() } - /// Search the top scoring entities according to a specified query returning no more than a specified limit of entities. + /// Find the highest-scoring nodes *and* edges (mixed) by similarity to a + /// natural-language query. The query is embedded server-side and matched + /// against indexed entity vectors. + async fn entities_by_similarity( &self, - query: String, - limit: usize, + #[graphql(desc = "Natural-language search string; embedded by the server.")] query: String, + #[graphql(desc = "Maximum number of results to return.")] limit: usize, + #[graphql( + desc = "Optional `{start, end}` to restrict matches to entities active in that interval." + )] window: Option, ) -> GraphResult { let vector = self.0.embed_text(query).await?; @@ -63,11 +76,17 @@ impl GqlVectorisedGraph { Ok(query.execute().await?.into()) } - /// Search the top scoring nodes according to a specified query returning no more than a specified limit of nodes. + /// Find the highest-scoring nodes by similarity to a natural-language + /// query. The query is embedded server-side and matched against indexed + /// node vectors. + async fn nodes_by_similarity( &self, - query: String, - limit: usize, + #[graphql(desc = "Natural-language search string; embedded by the server.")] query: String, + #[graphql(desc = "Maximum number of nodes to return.")] limit: usize, + #[graphql( + desc = "Optional `{start, end}` to restrict matches to nodes active in that interval." + )] window: Option, ) -> GraphResult { let vector = self.0.embed_text(query).await?; @@ -77,11 +96,17 @@ impl GqlVectorisedGraph { Ok(query.execute().await?.into()) } - /// Search the top scoring edges according to a specified query returning no more than a specified limit of edges. + /// Find the highest-scoring edges by similarity to a natural-language + /// query. The query is embedded server-side and matched against indexed + /// edge vectors. + async fn edges_by_similarity( &self, - query: String, - limit: usize, + #[graphql(desc = "Natural-language search string; embedded by the server.")] query: String, + #[graphql(desc = "Maximum number of edges to return.")] limit: usize, + #[graphql( + desc = "Optional `{start, end}` to restrict matches to edges active in that interval." + )] window: Option, ) -> GraphResult { let vector = self.0.embed_text(query).await?; diff --git a/raphtory-graphql/src/model/graph/windowset.rs b/raphtory-graphql/src/model/graph/windowset.rs index da1fd6311e..e118986553 100644 --- a/raphtory-graphql/src/model/graph/windowset.rs +++ b/raphtory-graphql/src/model/graph/windowset.rs @@ -21,6 +21,10 @@ use raphtory::db::{ graph::{edge::EdgeView, edges::Edges, node::NodeView, nodes::Nodes, path::PathFromNode}, }; +/// A lazy sequence of graph snapshots produced by `rolling` or `expanding`. +/// Each entry is a `Graph` at a different window over time. Iterate via +/// `list` / `page` (or count with `count`). Subsequent view ops apply +/// per-window. #[derive(ResolvedObject, Clone)] #[graphql(name = "GraphWindowSet")] pub(crate) struct GqlGraphWindowSet { @@ -35,7 +39,8 @@ impl GqlGraphWindowSet { } #[ResolvedObjectFields] impl GqlGraphWindowSet { - /// Returns the number of items. + /// Number of windows in this set. Materialising all windows is expensive for + /// large graphs — prefer `page` over `list` when iterating. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.ws.clone().count()).await @@ -46,11 +51,16 @@ impl GqlGraphWindowSet { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -68,6 +78,8 @@ impl GqlGraphWindowSet { .await) } + /// Materialise every window as a list. Rejected by the server when bulk list + /// endpoints are disabled; use `page` for paginated access instead. async fn list(&self, ctx: &Context<'_>) -> async_graphql::Result> { check_list_allowed(ctx)?; let self_clone = self.clone(); @@ -82,6 +94,9 @@ impl GqlGraphWindowSet { } } +/// A lazy sequence of per-window views of a single node, produced by +/// `node.rolling` / `node.expanding`. Each entry is the node as it exists in +/// that window. #[derive(ResolvedObject, Clone)] #[graphql(name = "NodeWindowSet")] pub(crate) struct GqlNodeWindowSet { @@ -95,6 +110,8 @@ impl GqlNodeWindowSet { } #[ResolvedObjectFields] impl GqlNodeWindowSet { + /// Number of windows in this set. Materialising all windows is expensive for + /// large graphs — prefer `page` over `list` when iterating. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.ws.clone().count()).await @@ -105,11 +122,16 @@ impl GqlNodeWindowSet { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -127,6 +149,8 @@ impl GqlNodeWindowSet { .await) } + /// Materialise every window as a list. Rejected by the server when bulk list + /// endpoints are disabled; use `page` for paginated access instead. async fn list(&self, ctx: &Context<'_>) -> async_graphql::Result> { check_list_allowed(ctx)?; let self_clone = self.clone(); @@ -134,6 +158,9 @@ impl GqlNodeWindowSet { } } +/// A lazy sequence of per-window node collections, produced by +/// `nodes.rolling` / `nodes.expanding`. Each entry is a `Nodes` collection +/// as it exists in that window. #[derive(ResolvedObject, Clone)] #[graphql(name = "NodesWindowSet")] pub(crate) struct GqlNodesWindowSet { @@ -149,6 +176,8 @@ impl GqlNodesWindowSet { } #[ResolvedObjectFields] impl GqlNodesWindowSet { + /// Number of windows in this set. Materialising all windows is expensive for + /// large graphs — prefer `page` over `list` when iterating. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.ws.clone().count()).await @@ -159,11 +188,16 @@ impl GqlNodesWindowSet { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -181,6 +215,8 @@ impl GqlNodesWindowSet { .await) } + /// Materialise every window as a list. Rejected by the server when bulk list + /// endpoints are disabled; use `page` for paginated access instead. async fn list(&self, ctx: &Context<'_>) -> async_graphql::Result> { check_list_allowed(ctx)?; let self_clone = self.clone(); @@ -191,6 +227,9 @@ impl GqlNodesWindowSet { } } +/// A lazy sequence of per-window neighbour sets, produced by +/// `neighbours.rolling` / `neighbours.expanding` (or the in/out variants). +/// Each entry is a `PathFromNode` scoped to that window. #[derive(ResolvedObject, Clone)] #[graphql(name = "PathFromNodeWindowSet")] pub(crate) struct GqlPathFromNodeWindowSet { @@ -204,6 +243,8 @@ impl GqlPathFromNodeWindowSet { } #[ResolvedObjectFields] impl GqlPathFromNodeWindowSet { + /// Number of windows in this set. Materialising all windows is expensive for + /// large graphs — prefer `page` over `list` when iterating. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.ws.clone().count()).await @@ -214,11 +255,16 @@ impl GqlPathFromNodeWindowSet { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -236,6 +282,8 @@ impl GqlPathFromNodeWindowSet { .await) } + /// Materialise every window as a list. Rejected by the server when bulk list + /// endpoints are disabled; use `page` for paginated access instead. async fn list(&self, ctx: &Context<'_>) -> async_graphql::Result> { check_list_allowed(ctx)?; let self_clone = self.clone(); @@ -250,6 +298,9 @@ impl GqlPathFromNodeWindowSet { } } +/// A lazy sequence of per-window views of a single edge, produced by +/// `edge.rolling` / `edge.expanding`. Each entry is the edge as it exists in +/// that window. #[derive(ResolvedObject, Clone)] #[graphql(name = "EdgeWindowSet")] pub(crate) struct GqlEdgeWindowSet { @@ -263,6 +314,8 @@ impl GqlEdgeWindowSet { } #[ResolvedObjectFields] impl GqlEdgeWindowSet { + /// Number of windows in this set. Materialising all windows is expensive for + /// large graphs — prefer `page` over `list` when iterating. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.ws.clone().count()).await @@ -273,11 +326,16 @@ impl GqlEdgeWindowSet { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -295,6 +353,8 @@ impl GqlEdgeWindowSet { .await) } + /// Materialise every window as a list. Rejected by the server when bulk list + /// endpoints are disabled; use `page` for paginated access instead. async fn list(&self, ctx: &Context<'_>) -> async_graphql::Result> { check_list_allowed(ctx)?; let self_clone = self.clone(); @@ -302,6 +362,9 @@ impl GqlEdgeWindowSet { } } +/// A lazy sequence of per-window edge collections, produced by +/// `edges.rolling` / `edges.expanding`. Each entry is an `Edges` collection +/// as it exists in that window. #[derive(ResolvedObject, Clone)] #[graphql(name = "EdgesWindowSet")] pub(crate) struct GqlEdgesWindowSet { @@ -315,6 +378,8 @@ impl GqlEdgesWindowSet { } #[ResolvedObjectFields] impl GqlEdgesWindowSet { + /// Number of windows in this set. Materialising all windows is expensive for + /// large graphs — prefer `page` over `list` when iterating. async fn count(&self) -> usize { let self_clone = self.clone(); blocking_compute(move || self_clone.ws.clone().count()).await @@ -325,11 +390,16 @@ impl GqlEdgesWindowSet { /// /// For example, if page(5, 2, 1) is called, a page with 5 items, offset by 11 items (2 pages of 5 + 1), /// will be returned. + async fn page( &self, ctx: &Context<'_>, - limit: usize, + #[graphql(desc = "Maximum number of items to return on this page.")] limit: usize, + #[graphql(desc = "Extra items to skip on top of `pageIndex` paging (default 0).")] offset: Option, + #[graphql( + desc = "Zero-based page number; multiplies `limit` to determine where to start (default 0)." + )] page_index: Option, ) -> async_graphql::Result> { check_page_limit(ctx, limit)?; @@ -347,6 +417,8 @@ impl GqlEdgesWindowSet { .await) } + /// Materialise every window as a list. Rejected by the server when bulk list + /// endpoints are disabled; use `page` for paginated access instead. async fn list(&self, ctx: &Context<'_>) -> async_graphql::Result> { check_list_allowed(ctx)?; let self_clone = self.clone(); diff --git a/raphtory-graphql/src/model/mod.rs b/raphtory-graphql/src/model/mod.rs index 94a8555f0b..de734a771a 100644 --- a/raphtory-graphql/src/model/mod.rs +++ b/raphtory-graphql/src/model/mod.rs @@ -12,6 +12,7 @@ use crate::{ mutable_graph::GqlMutableGraph, namespace::Namespace, namespaced_item::NamespacedItem, + node_id::GqlNodeId, vectorised_graph::GqlVectorisedGraph, }, plugins::{ @@ -379,6 +380,11 @@ fn require_graph_read_src( } } +/// Top-level READ-only query root. Entry points for loading a graph +/// (`graph`, `graphMetadata`), browsing stored graphs (`namespaces`, +/// `namespace`, `root`), downloading a stored graph as a base64 blob +/// (`receiveGraph`), inspecting vectorised variants (`vectorisedGraph`), +/// and a few utility endpoints (`version`, `hello`, `plugins`). #[derive(ResolvedObject)] #[graphql(root)] pub(crate) struct QueryRoot; @@ -401,13 +407,31 @@ fn resolve(template: Option