Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions python/python/raphtory/algorithms/__init__.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ def pagerank(
max_diff: Optional[float] = None,
use_l2_norm: bool = True,
damping_factor: float = 0.85,
personalization: Optional[dict[NodeInput, float]] = None,
) -> NodeStateF64:
"""
Pagerank -- pagerank centrality value of the nodes in a graph
Expand All @@ -305,6 +306,9 @@ def pagerank(
is less than the max diff value given.
use_l2_norm (bool): Flag for choosing the norm to use for convergence checks, True for l2 norm, False for l1 norm. Defaults to True.
damping_factor (float): The damping factor for the PageRank calculation. Defaults to 0.85.
personalization (Optional[dict[NodeInput, float]]): A dictionary mapping nodes to personalization values.
When provided, the random walk teleports to nodes proportionally to these values
instead of uniformly. Values are normalized to sum to 1. Defaults to None (uniform).

Returns:
NodeStateF64: Mapping of nodes to their pagerank value.
Expand Down
15 changes: 15 additions & 0 deletions python/tests/test_base_install/test_graphdb/test_algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,21 @@ def test_page_rank():
assert actual == expected


def test_personalized_page_rank():
g = Graph()
edges = [(1, 2), (1, 4), (2, 3), (3, 1), (4, 1)]
for src, dst in edges:
g.add_edge(0, src, dst, {})

actual = algorithms.pagerank(g, iter_count=1000, personalization={"1": 1.0, "2": 0.0, "3": 0.0, "4": 0.0})
for node, expected in [("1", 0.45223), ("2", 0.19220), ("3", 0.16337), ("4", 0.19220)]:
assert abs(actual[node] - expected) < 1e-5, f"node {node}: {actual[node]} != {expected}"

actual = algorithms.pagerank(g, iter_count=1000, max_diff=1e-10, use_l2_norm=False, personalization={"1": 0.5, "3": 0.5})
for node, expected in [("1", 0.41832), ("2", 0.17778), ("3", 0.22612), ("4", 0.17778)]:
assert abs(actual[node] - expected) < 1e-5, f"node {node}: {actual[node]} != {expected}"


def test_temporal_reachability():
g = gen_graph()

Expand Down
2 changes: 1 addition & 1 deletion raphtory-benchmark/benches/algobench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ pub fn graphgen_large_pagerank(c: &mut Criterion) {
&graph,
|b, graph| {
b.iter(|| {
let result = unweighted_page_rank(graph, Some(100), None, None, true, None);
let result = unweighted_page_rank(graph, Some(100), None, None, true, None, None);
black_box(result);
});
},
Expand Down
1 change: 1 addition & 0 deletions raphtory-graphql/src/model/plugins/algorithms.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ fn apply_pagerank<'b>(
tol,
true,
damping_factor,
None,
);
let result = binding
.into_iter()
Expand Down
152 changes: 128 additions & 24 deletions raphtory/src/algorithms/centrality/pagerank.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,56 @@ use crate::{
prelude::GraphViewOps,
};
use num_traits::abs;
use raphtory_api::core::entities::VID;
use std::collections::HashMap;
use std::sync::Arc;

trait Teleport: Clone + Send + Sync + 'static {
fn teleport_value(&self, vid_index: usize, damp: f64) -> f64;
fn sink_contribution(&self, prev_score: f64) -> f64;
fn distribute_sink(&self, total_sink: f64, vid_index: usize, damp: f64) -> f64;
}

#[derive(Clone)]
struct Uniform {
teleport_prob: f64,
factor: f64,
}

impl Teleport for Uniform {
#[inline]
fn teleport_value(&self, _vid_index: usize, _damp: f64) -> f64 {
self.teleport_prob
}
#[inline]
fn sink_contribution(&self, prev_score: f64) -> f64 {
self.factor * prev_score
}
#[inline]
fn distribute_sink(&self, total_sink: f64, _vid_index: usize, _damp: f64) -> f64 {
total_sink
}
}

#[derive(Clone)]
struct Personalized {
weights: Arc<Vec<f64>>,
}

impl Teleport for Personalized {
#[inline]
fn teleport_value(&self, vid_index: usize, damp: f64) -> f64 {
(1.0 - damp) * self.weights[vid_index]
}
#[inline]
fn sink_contribution(&self, prev_score: f64) -> f64 {
prev_score
}
#[inline]
fn distribute_sink(&self, total_sink: f64, vid_index: usize, damp: f64) -> f64 {
damp * total_sink * self.weights[vid_index]
}
}

#[derive(Clone, Debug, Default)]
struct PageRankState {
Expand Down Expand Up @@ -45,6 +95,9 @@ impl PageRankState {
/// - `tol`: The tolerance value for convergence
/// - `use_l2_norm`: Whether to use L2 norm for convergence
/// - `damping_factor`: Probability of likelihood the spread will continue
/// - `personalization`: Optional map from node VID to personalization weight.
/// When provided, the random walk teleports proportionally to these weights
/// instead of uniformly. Values are normalized to sum to 1.
///
/// # Returns
///
Expand All @@ -57,16 +110,58 @@ pub fn unweighted_page_rank<G: StaticGraphViewOps>(
tol: Option<f64>,
use_l2_norm: bool,
damping_factor: Option<f64>,
personalization: Option<HashMap<VID, f64>>,
) -> NodeState<'static, f64, G> {
let n = g.count_nodes();
let damp = damping_factor.unwrap_or(0.85);

match personalization {
Some(p) => {
let total: f64 = p.values().sum();
let mut weights = vec![0.0f64; n];
for (&vid, &value) in &p {
weights[vid.index()] = value / total;
}
run_pagerank(
g,
iter_count,
threads,
tol,
use_l2_norm,
damp,
Personalized { weights: Arc::new(weights) },
)
}
None => run_pagerank(
g,
iter_count,
threads,
tol,
use_l2_norm,
damp,
Uniform {
teleport_prob: (1f64 - damp) / n as f64,
factor: damp / n as f64,
},
),
}
}

fn run_pagerank<G: StaticGraphViewOps, T: Teleport>(
g: &G,
iter_count: Option<usize>,
threads: Option<usize>,
tol: Option<f64>,
use_l2_norm: bool,
damp: f64,
teleport: T,
) -> NodeState<'static, f64, G> {
let n = g.count_nodes();

let mut ctx: Context<G, ComputeStateVec> = g.into();

let tol: f64 = tol.unwrap_or(0.000001f64);
let damp = damping_factor.unwrap_or(0.85);
let iter_count = iter_count.unwrap_or(20);
let teleport_prob = (1f64 - damp) / n as f64;
let factor = damp / n as f64;

let max_diff = accumulators::sum::<f64>(2);

Expand All @@ -83,35 +178,42 @@ pub fn unweighted_page_rank<G: StaticGraphViewOps>(
Step::Continue
});

let step2: ATask<G, ComputeStateVec, PageRankState, _> = ATask::new(move |s| {
// reset score
{
let state: &mut PageRankState = s.get_mut();
state.reset();
}
let step2: ATask<G, ComputeStateVec, PageRankState, _> = ATask::new({
let teleport = teleport.clone();
move |s| {
{
let state: &mut PageRankState = s.get_mut();
state.reset();
}

for t in s.in_neighbours() {
let prev = t.prev();
for t in s.in_neighbours() {
let prev = t.prev();

s.get_mut().score += prev.score / prev.out_degree as f64;
}
s.get_mut().score += prev.score / prev.out_degree as f64;
}

s.get_mut().score *= damp;
s.get_mut().score *= damp;

s.get_mut().score += teleport_prob;
Step::Continue
s.get_mut().score += teleport.teleport_value(s.node.index(), damp);
Step::Continue
}
});

let step3 = ATask::new(move |s| {
let state: &mut PageRankState = s.get_mut();
let step3 = ATask::new({
let teleport = teleport.clone();
move |s| {
let state: &mut PageRankState = s.get_mut();

if state.out_degree == 0 {
let curr = s.prev().score;
if state.out_degree == 0 {
let curr = s.prev().score;

let ts_contrib = factor * curr;
s.global_update(&total_sink_contribution, ts_contrib);
s.global_update(
&total_sink_contribution,
teleport.sink_contribution(curr),
);
}
Step::Continue
}
Step::Continue
});

let step4 = ATask::new(move |s| {
Expand All @@ -120,8 +222,10 @@ pub fn unweighted_page_rank<G: StaticGraphViewOps>(
.read_global_state(&total_sink_contribution)
.unwrap_or_default();
// update local score with total sink contribution
let vid_index = s.node.index();
let state: &mut PageRankState = s.get_mut();
state.score += total_sink_contribution;
state.score +=
teleport.distribute_sink(total_sink_contribution, vid_index, damp);

// update global max diff

Expand Down
17 changes: 15 additions & 2 deletions raphtory/src/python/packages/algorithms.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,12 @@ use crate::{
utils::PyNodeRef,
},
};
use crate::prelude::GraphViewOps;
use pyo3::{prelude::*, types::PyList};
use rand::{prelude::StdRng, SeedableRng};
use raphtory_api::core::{entities::LayerIds, storage::timeindex::EventTime, Direction};
use raphtory_storage::core_ops::CoreGraphOps;
use std::collections::HashSet;
use std::collections::{HashMap, HashSet};

/// Helper function to parse single-vertex or multi-vertex parameters to a Vec of vertices
fn process_node_param(param: &Bound<PyAny>) -> PyResult<Vec<PyNodeRef>> {
Expand Down Expand Up @@ -268,25 +269,37 @@ pub fn out_component(
/// is less than the max diff value given.
/// use_l2_norm (bool): Flag for choosing the norm to use for convergence checks, True for l2 norm, False for l1 norm. Defaults to True.
/// damping_factor (float): The damping factor for the PageRank calculation. Defaults to 0.85.
/// personalization (Optional[dict[Node, float]]): A dictionary mapping nodes to personalization values.
/// When provided, the random walk teleports to nodes proportionally to these values
/// instead of uniformly. Values are normalized to sum to 1. Defaults to None (uniform).
///
/// Returns:
/// NodeStateF64: Mapping of nodes to their pagerank value.
#[pyfunction]
#[pyo3(signature = (graph, iter_count=20, max_diff=None, use_l2_norm=true, damping_factor=0.85))]
#[pyo3(signature = (graph, iter_count=20, max_diff=None, use_l2_norm=true, damping_factor=0.85, personalization=None))]
pub fn pagerank(
graph: &PyGraphView,
iter_count: usize,
max_diff: Option<f64>,
use_l2_norm: bool,
damping_factor: Option<f64>,
personalization: Option<HashMap<PyNodeRef, f64>>,
) -> NodeState<'static, f64, DynamicGraph> {
let personalization = personalization.map(|p| {
p.into_iter()
.filter_map(|(node_ref, value)| {
graph.graph.node(node_ref).map(|n| (n.node, value))
})
.collect()
});
unweighted_page_rank(
&graph.graph,
Some(iter_count),
None,
max_diff,
use_l2_norm,
damping_factor,
personalization,
)
}

Expand Down
Loading