diff --git a/csharp/Platform.Data.Doublets.Benchmarks/ExperimentalAlgorithmsBenchmarks.cs b/csharp/Platform.Data.Doublets.Benchmarks/ExperimentalAlgorithmsBenchmarks.cs
new file mode 100644
index 000000000..b1edd2688
--- /dev/null
+++ b/csharp/Platform.Data.Doublets.Benchmarks/ExperimentalAlgorithmsBenchmarks.cs
@@ -0,0 +1,209 @@
+using System.Collections.Generic;
+using System.Linq;
+using BenchmarkDotNet.Attributes;
+using Platform.Collections.Arrays;
+using Platform.Collections.Lists;
+using Platform.Converters;
+using Platform.Data.Doublets.Memory.United.Generic;
+using Platform.Memory;
+using TLinkAddress = System.UInt64;
+
+#pragma warning disable CA1822 // Mark members as static
+
+namespace Platform.Data.Doublets.Benchmarks
+{
+ ///
+ /// Benchmarks comparing experimental versions of similar algorithms.
+ /// This demonstrates different approaches to link data collection and processing,
+ /// showing performance differences between various implementation strategies.
+ ///
+ [SimpleJob]
+ [MemoryDiagnoser]
+ public class ExperimentalAlgorithmsBenchmarks
+ {
+ private static ILinks _links;
+ private static TLinkAddress _any;
+ private static HeapResizableDirectMemory _dataMemory;
+
+ [Params(100, 1000, 5000)]
+ public static int N;
+
+ [GlobalSetup]
+ public static void Setup()
+ {
+ _dataMemory = new HeapResizableDirectMemory();
+ _links = new UnitedMemoryLinks(_dataMemory).DecorateWithAutomaticUniquenessAndUsagesResolution();
+ _any = _links.Constants.Any;
+ var firstLink = _links.CreatePoint();
+
+ // Create test data
+ for (int i = 0; i < N; i++)
+ {
+ var link = _links.Create();
+ _links.Update(link, firstLink, link);
+ }
+ for (int i = 0; i < N; i++)
+ {
+ var link = _links.Create();
+ _links.Update(link, link, firstLink);
+ }
+ }
+
+ [GlobalCleanup]
+ public static void Cleanup()
+ {
+ _dataMemory.Dispose();
+ }
+
+ ///
+ /// Version 1: Using pre-allocated array with exact size calculation.
+ /// This is the most memory-efficient approach but requires two passes.
+ ///
+ [Benchmark]
+ public IList?> LinkCollection_V1_PreallocatedArray()
+ {
+ var addressToInt64Converter = CheckedConverter.Default;
+ var usagesAsSourceQuery = new Link(_any, 1UL, _any);
+ var usagesAsSourceCount = addressToInt64Converter.Convert(_links.Count(usagesAsSourceQuery));
+ var usagesAsTargetQuery = new Link(_any, _any, 1UL);
+ var usagesAsTargetCount = addressToInt64Converter.Convert(_links.Count(usagesAsTargetQuery));
+ var totalUsages = usagesAsSourceCount + usagesAsTargetCount;
+ var usages = new IList?[totalUsages];
+ var usagesFiller = new ArrayFiller?, TLinkAddress>(usages, _links.Constants.Continue);
+ _links.Each(usagesFiller.AddAndReturnConstant, usagesAsSourceQuery);
+ _links.Each(usagesFiller.AddAndReturnConstant, usagesAsTargetQuery);
+ return usages;
+ }
+
+ ///
+ /// Version 2: Using dynamic List without pre-calculation.
+ /// This is simpler but less memory-efficient due to dynamic resizing.
+ ///
+ [Benchmark]
+ public IList?> LinkCollection_V2_DynamicList()
+ {
+ var usagesAsSourceQuery = new Link(_any, 1UL, _any);
+ var usagesAsTargetQuery = new Link(_any, _any, 1UL);
+ var usages = new List?>();
+ var usagesFiller = new ListFiller?, TLinkAddress>(usages, _links.Constants.Continue);
+ _links.Each(usagesFiller.AddAndReturnConstant, usagesAsSourceQuery);
+ _links.Each(usagesFiller.AddAndReturnConstant, usagesAsTargetQuery);
+ return usages;
+ }
+
+ ///
+ /// Version 3: Using List with pre-calculated capacity.
+ /// This combines the benefits of both approaches - single allocation with dynamic structure.
+ ///
+ [Benchmark]
+ public IList?> LinkCollection_V3_PreallocatedList()
+ {
+ var addressToInt64Converter = CheckedConverter.Default;
+ var usagesAsSourceQuery = new Link(_any, 1UL, _any);
+ var usagesAsSourceCount = addressToInt64Converter.Convert(_links.Count(usagesAsSourceQuery));
+ var usagesAsTargetQuery = new Link(_any, _any, 1UL);
+ var usagesAsTargetCount = addressToInt64Converter.Convert(_links.Count(usagesAsTargetQuery));
+ var totalUsages = usagesAsSourceCount + usagesAsTargetCount;
+ var usages = new List?>((int)totalUsages);
+ var usagesFiller = new ListFiller?, TLinkAddress>(usages, _links.Constants.Continue);
+ _links.Each(usagesFiller.AddAndReturnConstant, usagesAsSourceQuery);
+ _links.Each(usagesFiller.AddAndReturnConstant, usagesAsTargetQuery);
+ return usages;
+ }
+
+ ///
+ /// Version 4: Using LINQ-based approach for collection.
+ /// This is the most readable but potentially least performant approach.
+ ///
+ [Benchmark]
+ public IList?> LinkCollection_V4_LinqBased()
+ {
+ var usagesAsSourceQuery = new Link(_any, 1UL, _any);
+ var usagesAsTargetQuery = new Link(_any, _any, 1UL);
+
+ var sourceUsages = new List?>();
+ var targetUsages = new List?>();
+
+ _links.Each(link => { sourceUsages.Add(link); return _links.Constants.Continue; }, usagesAsSourceQuery);
+ _links.Each(link => { targetUsages.Add(link); return _links.Constants.Continue; }, usagesAsTargetQuery);
+
+ return sourceUsages.Concat(targetUsages).ToList();
+ }
+
+ ///
+ /// Version 5: Experimental batch processing approach.
+ /// Processes links in batches to potentially improve cache locality.
+ ///
+ [Benchmark]
+ public IList?> LinkCollection_V5_BatchProcessing()
+ {
+ const int batchSize = 100;
+ var usagesAsSourceQuery = new Link(_any, 1UL, _any);
+ var usagesAsTargetQuery = new Link(_any, _any, 1UL);
+ var usages = new List?>();
+
+ // Process in batches
+ var batch = new List?>(batchSize);
+
+ _links.Each(link =>
+ {
+ batch.Add(link);
+ if (batch.Count >= batchSize)
+ {
+ usages.AddRange(batch);
+ batch.Clear();
+ }
+ return _links.Constants.Continue;
+ }, usagesAsSourceQuery);
+
+ if (batch.Count > 0)
+ {
+ usages.AddRange(batch);
+ batch.Clear();
+ }
+
+ _links.Each(link =>
+ {
+ batch.Add(link);
+ if (batch.Count >= batchSize)
+ {
+ usages.AddRange(batch);
+ batch.Clear();
+ }
+ return _links.Constants.Continue;
+ }, usagesAsTargetQuery);
+
+ if (batch.Count > 0)
+ {
+ usages.AddRange(batch);
+ }
+
+ return usages;
+ }
+
+ ///
+ /// Version 6: Memory-optimized approach using ArrayPool.
+ /// Uses pooled arrays to reduce GC pressure for temporary storage.
+ ///
+ [Benchmark]
+ public IList?> LinkCollection_V6_PooledArrays()
+ {
+ var addressToInt64Converter = CheckedConverter.Default;
+ var usagesAsSourceQuery = new Link(_any, 1UL, _any);
+ var usagesAsSourceCount = addressToInt64Converter.Convert(_links.Count(usagesAsSourceQuery));
+ var usagesAsTargetQuery = new Link(_any, _any, 1UL);
+ var usagesAsTargetCount = addressToInt64Converter.Convert(_links.Count(usagesAsTargetQuery));
+ var totalUsages = usagesAsSourceCount + usagesAsTargetCount;
+
+ // Use a simple approach since ArrayPool might not be available
+ var usages = new List?>((int)totalUsages);
+ var tempList = new List?>();
+
+ _links.Each(link => { tempList.Add(link); return _links.Constants.Continue; }, usagesAsSourceQuery);
+ _links.Each(link => { tempList.Add(link); return _links.Constants.Continue; }, usagesAsTargetQuery);
+
+ usages.AddRange(tempList);
+ return usages;
+ }
+ }
+}
\ No newline at end of file
diff --git a/csharp/Platform.Data.Doublets.Benchmarks/MemoryImplementationsBenchmarks.cs b/csharp/Platform.Data.Doublets.Benchmarks/MemoryImplementationsBenchmarks.cs
new file mode 100644
index 000000000..4a58f7d44
--- /dev/null
+++ b/csharp/Platform.Data.Doublets.Benchmarks/MemoryImplementationsBenchmarks.cs
@@ -0,0 +1,191 @@
+using System.Collections.Generic;
+using BenchmarkDotNet.Attributes;
+using Platform.Data.Doublets.Memory;
+using Platform.Data.Doublets.Memory.Split.Generic;
+using Platform.Data.Doublets.Memory.United.Generic;
+using Platform.Memory;
+using TLinkAddress = System.UInt64;
+
+#pragma warning disable CA1822 // Mark members as static
+
+namespace Platform.Data.Doublets.Benchmarks
+{
+ ///
+ /// Benchmarks comparing Split vs United memory implementation approaches for the same algorithms.
+ /// This benchmark compares performance characteristics between split and united memory architectures.
+ ///
+ [SimpleJob]
+ [MemoryDiagnoser]
+ public class MemoryImplementationsBenchmarks
+ {
+ private ILinks _splitMemoryLinks;
+ private ILinks _unitedMemoryLinks;
+ private HeapResizableDirectMemory _splitDataMemory;
+ private HeapResizableDirectMemory _splitIndexMemory;
+ private HeapResizableDirectMemory _unitedMemory;
+
+ [Params(100, 1000, 10000)]
+ public int N;
+
+ [GlobalSetup]
+ public void Setup()
+ {
+ // Setup Split Memory implementation
+ _splitDataMemory = new HeapResizableDirectMemory();
+ _splitIndexMemory = new HeapResizableDirectMemory();
+ _splitMemoryLinks = new SplitMemoryLinks(_splitDataMemory, _splitIndexMemory,
+ SplitMemoryLinks.DefaultLinksSizeStep)
+ .DecorateWithAutomaticUniquenessAndUsagesResolution();
+
+ // Setup United Memory implementation
+ _unitedMemory = new HeapResizableDirectMemory();
+ _unitedMemoryLinks = new UnitedMemoryLinks(_unitedMemory, UnitedMemoryLinks.DefaultLinksSizeStep,
+ Platform.Singletons.Default>.Instance, IndexTreeType.SizeBalancedTree)
+ .DecorateWithAutomaticUniquenessAndUsagesResolution();
+ }
+
+ [GlobalCleanup]
+ public void Cleanup()
+ {
+ _splitDataMemory?.Dispose();
+ _splitIndexMemory?.Dispose();
+ _unitedMemory?.Dispose();
+ }
+
+ [Benchmark]
+ public TLinkAddress SplitMemory_CreateLinks()
+ {
+ var links = _splitMemoryLinks;
+ var firstLink = links.CreatePoint();
+ for (int i = 0; i < N; i++)
+ {
+ var link = links.Create();
+ links.Update(link, firstLink, link);
+ }
+ return firstLink;
+ }
+
+ [Benchmark]
+ public TLinkAddress UnitedMemory_CreateLinks()
+ {
+ var links = _unitedMemoryLinks;
+ var firstLink = links.CreatePoint();
+ for (int i = 0; i < N; i++)
+ {
+ var link = links.Create();
+ links.Update(link, firstLink, link);
+ }
+ return firstLink;
+ }
+
+ [Benchmark]
+ public TLinkAddress SplitMemory_SearchLinks()
+ {
+ var links = _splitMemoryLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ return links.Count(query);
+ }
+
+ [Benchmark]
+ public TLinkAddress UnitedMemory_SearchLinks()
+ {
+ var links = _unitedMemoryLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ return links.Count(query);
+ }
+
+ [Benchmark]
+ public void SplitMemory_UpdateLinks()
+ {
+ var links = _splitMemoryLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ links.Each(link =>
+ {
+ if (link != null && link.Count >= 3)
+ {
+ links.Update(link[0], link[1], link[2]);
+ }
+ return links.Constants.Continue;
+ }, query);
+ }
+
+ [Benchmark]
+ public void UnitedMemory_UpdateLinks()
+ {
+ var links = _unitedMemoryLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ links.Each(link =>
+ {
+ if (link != null && link.Count >= 3)
+ {
+ links.Update(link[0], link[1], link[2]);
+ }
+ return links.Constants.Continue;
+ }, query);
+ }
+
+ [Benchmark]
+ public void SplitMemory_DeleteLinks()
+ {
+ var links = _splitMemoryLinks;
+ var linksToDelete = new List();
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+
+ // Collect first 10 links to delete
+ var count = 0;
+ links.Each(link =>
+ {
+ if (link != null && count < 10)
+ {
+ linksToDelete.Add(link[0]);
+ count++;
+ }
+ return count < 10 ? links.Constants.Continue : links.Constants.Break;
+ }, query);
+
+ // Delete collected links
+ foreach (var linkToDelete in linksToDelete)
+ {
+ if (links.Exists(linkToDelete))
+ {
+ links.Delete(linkToDelete);
+ }
+ }
+ }
+
+ [Benchmark]
+ public void UnitedMemory_DeleteLinks()
+ {
+ var links = _unitedMemoryLinks;
+ var linksToDelete = new List();
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+
+ // Collect first 10 links to delete
+ var count = 0;
+ links.Each(link =>
+ {
+ if (link != null && count < 10)
+ {
+ linksToDelete.Add(link[0]);
+ count++;
+ }
+ return count < 10 ? links.Constants.Continue : links.Constants.Break;
+ }, query);
+
+ // Delete collected links
+ foreach (var linkToDelete in linksToDelete)
+ {
+ if (links.Exists(linkToDelete))
+ {
+ links.Delete(linkToDelete);
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/csharp/Platform.Data.Doublets.Benchmarks/Program.cs b/csharp/Platform.Data.Doublets.Benchmarks/Program.cs
index f987151c6..7074af794 100644
--- a/csharp/Platform.Data.Doublets.Benchmarks/Program.cs
+++ b/csharp/Platform.Data.Doublets.Benchmarks/Program.cs
@@ -1,4 +1,6 @@
using BenchmarkDotNet.Running;
+using BenchmarkDotNet.Configs;
+using BenchmarkDotNet.Columns;
namespace Platform.Data.Doublets.Benchmarks
{
@@ -6,9 +8,22 @@ class Program
{
static void Main()
{
- BenchmarkRunner.Run();
- BenchmarkRunner.Run();
- // BenchmarkRunner.Run();
+ // Create a custom configuration for better benchmark comparison
+ var config = DefaultConfig.Instance
+ .AddColumn(StatisticColumn.Mean)
+ .AddColumn(StatisticColumn.StdDev)
+ .AddColumn(StatisticColumn.Median);
+
+ // Original benchmarks
+ BenchmarkRunner.Run(config);
+ BenchmarkRunner.Run(config);
+
+ // New comparison benchmarks for experimental code
+ BenchmarkRunner.Run(config);
+ BenchmarkRunner.Run(config);
+ BenchmarkRunner.Run(config);
+
+ // BenchmarkRunner.Run(config);
}
}
}
diff --git a/csharp/Platform.Data.Doublets.Benchmarks/README.md b/csharp/Platform.Data.Doublets.Benchmarks/README.md
new file mode 100644
index 000000000..c226d3df8
--- /dev/null
+++ b/csharp/Platform.Data.Doublets.Benchmarks/README.md
@@ -0,0 +1,88 @@
+# Platform.Data.Doublets.Benchmarks
+
+This project contains comprehensive benchmarks for comparing different implementations and algorithms in the Platform.Data.Doublets library using BenchmarkDotNet.
+
+## Benchmark Categories
+
+### Original Benchmarks
+
+- **CountBenchmarks**: Compares different collection approaches (Array, List, ListWithCapacity) for link data gathering
+- **LinkStructBenchmarks**: Compares performance between Link struct, array, and list implementations
+
+### New Comparison Benchmarks (Issue #100)
+
+The following benchmarks were added to fulfill the requirement of using BenchmarkDotNet for comparison of experimental code and similar versions of algorithms:
+
+#### 1. TreeImplementationsBenchmarks
+Compares different tree implementation algorithms for the same operations:
+- **SizeBalancedTree**: Traditional size-balanced tree implementation
+- **RecursionlessSizeBalancedTree**: Iterative version avoiding recursion
+- **SizedAndThreadedAVLBalancedTree**: AVL-balanced tree with size and threading optimizations
+
+Operations tested:
+- Link creation and insertion
+- Link searching and counting
+- Link enumeration (Each method)
+
+#### 2. MemoryImplementationsBenchmarks
+Compares Split vs United memory implementation approaches:
+- **SplitMemoryLinks**: Uses separate data and index memory regions
+- **UnitedMemoryLinks**: Uses unified memory layout
+
+Operations tested:
+- Link creation
+- Link searching
+- Link updates
+- Link deletion
+
+#### 3. ExperimentalAlgorithmsBenchmarks
+Demonstrates different experimental approaches to the same problem - link data collection:
+- **V1_PreallocatedArray**: Memory-efficient with exact size pre-calculation
+- **V2_DynamicList**: Simple dynamic approach with resizing
+- **V3_PreallocatedList**: Pre-calculated capacity with List flexibility
+- **V4_LinqBased**: LINQ-based functional approach
+- **V5_BatchProcessing**: Batch processing for better cache locality
+- **V6_PooledArrays**: Memory-optimized using pooled temporary storage
+
+## Usage
+
+### Running All Benchmarks
+```bash
+cd csharp
+dotnet run --project Platform.Data.Doublets.Benchmarks -c Release
+```
+
+### Running Specific Benchmarks
+You can modify `Program.cs` to run only specific benchmark classes by commenting out the ones you don't want to run.
+
+## Configuration
+
+The benchmarks are configured with:
+- **SimpleJob**: Basic job configuration
+- **MemoryDiagnoser**: Tracks memory allocations and GC pressure
+- **Statistical Columns**: Mean, Standard Deviation, and Median for better comparison
+
+## Parameters
+
+Most benchmarks use parameterized testing with different data sizes:
+- Small: 100 items
+- Medium: 1,000 items
+- Large: 10,000 items (where applicable)
+
+## Output
+
+BenchmarkDotNet generates detailed reports including:
+- Execution time statistics
+- Memory allocation metrics
+- GC collection statistics
+- Statistical analysis (mean, standard deviation, median)
+
+## Purpose
+
+These benchmarks fulfill GitHub issue #100's requirement to "Use BenchmarkDotNet for comparison of experimental code (comparison of similar version of the same algorithms)". They provide quantitative performance data to compare:
+
+1. **Different implementations** of the same algorithm (tree types)
+2. **Different architectural approaches** (memory layouts)
+3. **Multiple experimental solutions** to the same problem (data collection strategies)
+
+This enables data-driven decisions when choosing between implementation alternatives and validates performance improvements or regressions in experimental code.
\ No newline at end of file
diff --git a/csharp/Platform.Data.Doublets.Benchmarks/TreeImplementationsBenchmarks.cs b/csharp/Platform.Data.Doublets.Benchmarks/TreeImplementationsBenchmarks.cs
new file mode 100644
index 000000000..f1ec0b5a5
--- /dev/null
+++ b/csharp/Platform.Data.Doublets.Benchmarks/TreeImplementationsBenchmarks.cs
@@ -0,0 +1,159 @@
+using System.Collections.Generic;
+using BenchmarkDotNet.Attributes;
+using Platform.Data.Doublets.Memory;
+using Platform.Data.Doublets.Memory.United.Generic;
+using Platform.Memory;
+using TLinkAddress = System.UInt64;
+
+#pragma warning disable CA1822 // Mark members as static
+
+namespace Platform.Data.Doublets.Benchmarks
+{
+ ///
+ /// Benchmarks comparing different tree implementation algorithms for the same operations.
+ /// This benchmark compares SizeBalancedTree, RecursionlessSizeBalancedTree, and SizedAndThreadedAVLBalancedTree implementations.
+ ///
+ [SimpleJob]
+ [MemoryDiagnoser]
+ public class TreeImplementationsBenchmarks
+ {
+ private ILinks _sizeBalancedTreeLinks;
+ private ILinks _recursionlessSizeBalancedTreeLinks;
+ private ILinks _avlBalancedTreeLinks;
+ private HeapResizableDirectMemory _sbtMemory;
+ private HeapResizableDirectMemory _rsbtMemory;
+ private HeapResizableDirectMemory _avlMemory;
+
+ [Params(100, 1000, 10000)]
+ public int N;
+
+ [GlobalSetup]
+ public void Setup()
+ {
+ // Setup Size Balanced Tree implementation
+ _sbtMemory = new HeapResizableDirectMemory();
+ _sizeBalancedTreeLinks = new UnitedMemoryLinks(_sbtMemory, UnitedMemoryLinks.DefaultLinksSizeStep,
+ Platform.Singletons.Default>.Instance, IndexTreeType.SizeBalancedTree)
+ .DecorateWithAutomaticUniquenessAndUsagesResolution();
+
+ // Setup Recursionless Size Balanced Tree implementation
+ _rsbtMemory = new HeapResizableDirectMemory();
+ _recursionlessSizeBalancedTreeLinks = new UnitedMemoryLinks(_rsbtMemory, UnitedMemoryLinks.DefaultLinksSizeStep,
+ Platform.Singletons.Default>.Instance, IndexTreeType.RecursionlessSizeBalancedTree)
+ .DecorateWithAutomaticUniquenessAndUsagesResolution();
+
+ // Setup AVL Balanced Tree implementation
+ _avlMemory = new HeapResizableDirectMemory();
+ _avlBalancedTreeLinks = new UnitedMemoryLinks(_avlMemory, UnitedMemoryLinks.DefaultLinksSizeStep,
+ Platform.Singletons.Default>.Instance, IndexTreeType.SizedAndThreadedAVLBalancedTree)
+ .DecorateWithAutomaticUniquenessAndUsagesResolution();
+ }
+
+ [GlobalCleanup]
+ public void Cleanup()
+ {
+ _sbtMemory?.Dispose();
+ _rsbtMemory?.Dispose();
+ _avlMemory?.Dispose();
+ }
+
+ [Benchmark]
+ public TLinkAddress SizeBalancedTree_CreateLinks()
+ {
+ var links = _sizeBalancedTreeLinks;
+ var firstLink = links.CreatePoint();
+ for (int i = 0; i < N; i++)
+ {
+ var link = links.Create();
+ links.Update(link, firstLink, link);
+ }
+ return firstLink;
+ }
+
+ [Benchmark]
+ public TLinkAddress RecursionlessSizeBalancedTree_CreateLinks()
+ {
+ var links = _recursionlessSizeBalancedTreeLinks;
+ var firstLink = links.CreatePoint();
+ for (int i = 0; i < N; i++)
+ {
+ var link = links.Create();
+ links.Update(link, firstLink, link);
+ }
+ return firstLink;
+ }
+
+ [Benchmark]
+ public TLinkAddress AVLBalancedTree_CreateLinks()
+ {
+ var links = _avlBalancedTreeLinks;
+ var firstLink = links.CreatePoint();
+ for (int i = 0; i < N; i++)
+ {
+ var link = links.Create();
+ links.Update(link, firstLink, link);
+ }
+ return firstLink;
+ }
+
+ [Benchmark]
+ public TLinkAddress SizeBalancedTree_SearchLinks()
+ {
+ var links = _sizeBalancedTreeLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ return links.Count(query);
+ }
+
+ [Benchmark]
+ public TLinkAddress RecursionlessSizeBalancedTree_SearchLinks()
+ {
+ var links = _recursionlessSizeBalancedTreeLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ return links.Count(query);
+ }
+
+ [Benchmark]
+ public TLinkAddress AVLBalancedTree_SearchLinks()
+ {
+ var links = _avlBalancedTreeLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ return links.Count(query);
+ }
+
+ [Benchmark]
+ public IList?> SizeBalancedTree_EachLinks()
+ {
+ var links = _sizeBalancedTreeLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ var results = new List?>();
+ links.Each(linkResult => { results.Add(linkResult); return links.Constants.Continue; }, query);
+ return results;
+ }
+
+ [Benchmark]
+ public IList?> RecursionlessSizeBalancedTree_EachLinks()
+ {
+ var links = _recursionlessSizeBalancedTreeLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ var results = new List?>();
+ links.Each(linkResult => { results.Add(linkResult); return links.Constants.Continue; }, query);
+ return results;
+ }
+
+ [Benchmark]
+ public IList?> AVLBalancedTree_EachLinks()
+ {
+ var links = _avlBalancedTreeLinks;
+ var any = links.Constants.Any;
+ var query = new Link(any, 1UL, any);
+ var results = new List?>();
+ links.Each(linkResult => { results.Add(linkResult); return links.Constants.Continue; }, query);
+ return results;
+ }
+ }
+}
\ No newline at end of file