diff --git a/common/src/main/java/com/viaversion/viarewind/api/compression/ThreadLocalCompressionProvider.java b/common/src/main/java/com/viaversion/viarewind/api/compression/ThreadLocalCompressionProvider.java
new file mode 100644
index 000000000..728cbaf1f
--- /dev/null
+++ b/common/src/main/java/com/viaversion/viarewind/api/compression/ThreadLocalCompressionProvider.java
@@ -0,0 +1,157 @@
+/*
+ * This file is part of ViaRewind - https://github.com/ViaVersion/ViaRewind
+ * Copyright (C) 2018-2026 ViaVersion and contributors
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+package com.viaversion.viarewind.api.compression;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufAllocator;
+
+import java.util.zip.DataFormatException;
+import java.util.zip.Deflater;
+import java.util.zip.Inflater;
+
+/**
+ * A ThreadLocal-based compression provider that either uses Velocity's native compression
+ * or falls back to Java's built-in compression (Deflater/Inflater).
+ */
+public final class ThreadLocalCompressionProvider {
+
+ private static final boolean VELOCITY_NATIVES_AVAILABLE;
+
+ static {
+ boolean velocityAvailable = false;
+ try {
+ Class.forName("com.velocitypowered.natives.compression.VelocityCompressor");
+ velocityAvailable = true;
+ } catch (final ClassNotFoundException ignored) {
+ }
+ VELOCITY_NATIVES_AVAILABLE = velocityAvailable;
+ }
+
+ // ThreadLocal for Java's Deflater (used when Velocity natives are not available)
+ private static final ThreadLocal JAVA_DEFLATER = ThreadLocal.withInitial(Deflater::new);
+
+ // ThreadLocal for Java's Inflater (used when Velocity natives are not available)
+ private static final ThreadLocal JAVA_INFLATER = ThreadLocal.withInitial(Inflater::new);
+
+ private ThreadLocalCompressionProvider() {
+ }
+
+ /**
+ * Compresses data from the source buffer into the destination buffer.
+ *
+ * @param source the source buffer containing uncompressed data
+ * @param destination the destination buffer to write compressed data to
+ * @throws DataFormatException if compression fails
+ */
+ public static void deflate(final ByteBuf source, final ByteBuf destination) throws DataFormatException {
+ if (VELOCITY_NATIVES_AVAILABLE) {
+ deflateVelocity(source, destination);
+ } else {
+ deflateJava(source, destination);
+ }
+ }
+
+
+ /**
+ * Decompresses data from the source buffer into the destination buffer.
+ *
+ * @param source the source buffer containing compressed data
+ * @param destination the destination buffer to write decompressed data to
+ * @param expectedSize the expected size of the decompressed data
+ * @throws DataFormatException if decompression fails
+ */
+ public static void inflate(final ByteBuf source, final ByteBuf destination, final int expectedSize) throws DataFormatException {
+ if (VELOCITY_NATIVES_AVAILABLE) {
+ inflateVelocity(source, destination, expectedSize);
+ } else {
+ inflateJava(source, destination, expectedSize);
+ }
+ }
+
+ // Java native implementation
+
+ private static void deflateJava(final ByteBuf source, final ByteBuf destination) throws DataFormatException {
+ ByteBuf temp = source;
+ if (!source.hasArray()) {
+ temp = ByteBufAllocator.DEFAULT.heapBuffer().writeBytes(source);
+ } else {
+ source.retain();
+ }
+ ByteBuf output = ByteBufAllocator.DEFAULT.heapBuffer();
+ try {
+ final Deflater deflater = JAVA_DEFLATER.get();
+ deflater.setInput(temp.array(), temp.arrayOffset() + temp.readerIndex(), temp.readableBytes());
+ deflater.finish();
+
+ while (!deflater.finished()) {
+ output.ensureWritable(4096);
+ output.writerIndex(output.writerIndex() + deflater.deflate(output.array(), output.arrayOffset() + output.writerIndex(), output.writableBytes()));
+ }
+ destination.writeBytes(output);
+ } finally {
+ output.release();
+ temp.release();
+ JAVA_DEFLATER.get().reset();
+ }
+ }
+
+
+ private static void inflateJava(final ByteBuf source, final ByteBuf destination, final int expectedSize) throws DataFormatException {
+ ByteBuf temp = source;
+ if (!source.hasArray()) {
+ temp = ByteBufAllocator.DEFAULT.heapBuffer().writeBytes(source);
+ } else {
+ source.retain();
+ }
+ ByteBuf output = ByteBufAllocator.DEFAULT.heapBuffer(expectedSize, expectedSize);
+ try {
+ final Inflater inflater = JAVA_INFLATER.get();
+ inflater.setInput(temp.array(), temp.arrayOffset() + temp.readerIndex(), temp.readableBytes());
+ output.writerIndex(output.writerIndex() + inflater.inflate(output.array(), output.arrayOffset(), expectedSize));
+ destination.writeBytes(output);
+ } finally {
+ output.release();
+ temp.release();
+ JAVA_INFLATER.get().reset();
+ }
+ }
+
+ // Velocity native implementation
+ private static void deflateVelocity(final ByteBuf source, final ByteBuf destination) throws DataFormatException {
+ VelocityHolder.deflate(source, destination);
+ }
+
+ private static void inflateVelocity(final ByteBuf source, final ByteBuf destination, final int expectedSize) throws DataFormatException {
+ VelocityHolder.inflate(source, destination, expectedSize);
+ }
+
+ private static final class VelocityHolder {
+
+ private static final ThreadLocal COMPRESSOR =
+ ThreadLocal.withInitial(() -> com.velocitypowered.natives.util.Natives.compress.get().create(-1));
+
+ static void deflate(final ByteBuf source, final ByteBuf destination) throws DataFormatException {
+ COMPRESSOR.get().deflate(source, destination);
+ }
+
+ static void inflate(final ByteBuf source, final ByteBuf destination, final int expectedSize) throws DataFormatException {
+ COMPRESSOR.get().inflate(source, destination, expectedSize);
+ }
+ }
+}
+
diff --git a/common/src/main/java/com/viaversion/viarewind/api/type/chunk/BulkChunkType1_7_6.java b/common/src/main/java/com/viaversion/viarewind/api/type/chunk/BulkChunkType1_7_6.java
index 9b5de5c95..3f451aeb3 100644
--- a/common/src/main/java/com/viaversion/viarewind/api/type/chunk/BulkChunkType1_7_6.java
+++ b/common/src/main/java/com/viaversion/viarewind/api/type/chunk/BulkChunkType1_7_6.java
@@ -17,11 +17,13 @@
*/
package com.viaversion.viarewind.api.type.chunk;
+import com.viaversion.viarewind.api.compression.ThreadLocalCompressionProvider;
import com.viaversion.viaversion.api.minecraft.chunks.Chunk;
import com.viaversion.viaversion.api.type.Type;
import io.netty.buffer.ByteBuf;
-import java.util.zip.Deflater;
+import java.util.zip.DataFormatException;
+
public class BulkChunkType1_7_6 extends Type {
@@ -63,41 +65,48 @@ public void write(ByteBuf buffer, Chunk[] chunks) {
);
}
- final byte[] data = new byte[totalSize];
- int offset = 0;
+ final ByteBuf uncompressed = buffer.alloc().buffer(totalSize);
- for (int i = 0; i < chunkCount; i++) {
- Chunk chunk = chunks[i];
- boolean biomes = chunk.isFullChunk() && chunk.getBiomeData() != null;
+ try {
+ for (int i = 0; i < chunkCount; i++) {
+ Chunk chunk = chunks[i];
+ boolean biomes = chunk.isFullChunk() && chunk.getBiomeData() != null;
+
+ ChunkType1_7_6.serialize(
+ chunk,
+ uncompressed,
+ addBitMasks[i],
+ anySkyLight,
+ biomes
+ );
+ }
- offset = ChunkType1_7_6.serialize(
- chunk,
- data,
- offset,
- addBitMasks[i],
- anySkyLight,
- biomes
- );
- }
+ buffer.writeShort(chunkCount);
- buffer.writeShort(chunkCount);
+ // Reserve 4 bytes for the compressed size
+ final int sizeIndex = buffer.writerIndex();
+ buffer.writeInt(0); // Placeholder for compressed size
- final Deflater deflater = new Deflater();
- byte[] compressedData;
- int compressedSize;
- try {
- deflater.setInput(data, 0, data.length);
- deflater.finish();
- compressedData = new byte[data.length];
- compressedSize = deflater.deflate(compressedData);
+ buffer.writeBoolean(anySkyLight);
+
+ // Write compressed data directly to output buffer
+ final int compressedStart = buffer.writerIndex();
+ try {
+ ThreadLocalCompressionProvider.deflate(uncompressed, buffer);
+ } catch (DataFormatException e) {
+ throw new RuntimeException("Failed to compress bulk chunk data", e);
+ }
+ final int compressedSize = buffer.writerIndex() - compressedStart;
+
+ // Go back and write the compressed size
+ final int endIndex = buffer.writerIndex();
+ buffer.writerIndex(sizeIndex);
+ buffer.writeInt(compressedSize);
+ buffer.writerIndex(endIndex);
} finally {
- deflater.end();
+ uncompressed.release();
}
- buffer.writeInt(compressedSize);
- buffer.writeBoolean(anySkyLight);
- buffer.writeBytes(compressedData, 0, compressedSize);
-
for (int i = 0; i < chunkCount; i++) {
Chunk chunk = chunks[i];
buffer.writeInt(chunk.getX());
diff --git a/common/src/main/java/com/viaversion/viarewind/api/type/chunk/ChunkType1_7_6.java b/common/src/main/java/com/viaversion/viarewind/api/type/chunk/ChunkType1_7_6.java
index 3d30b133e..0e723b936 100644
--- a/common/src/main/java/com/viaversion/viarewind/api/type/chunk/ChunkType1_7_6.java
+++ b/common/src/main/java/com/viaversion/viarewind/api/type/chunk/ChunkType1_7_6.java
@@ -17,13 +17,15 @@
*/
package com.viaversion.viarewind.api.type.chunk;
+import com.viaversion.viarewind.api.compression.ThreadLocalCompressionProvider;
import com.viaversion.viaversion.api.minecraft.chunks.Chunk;
import com.viaversion.viaversion.api.minecraft.chunks.ChunkSection;
import com.viaversion.viaversion.api.minecraft.chunks.DataPalette;
import com.viaversion.viaversion.api.minecraft.chunks.PaletteType;
import com.viaversion.viaversion.api.type.Type;
import io.netty.buffer.ByteBuf;
-import java.util.zip.Deflater;
+
+import java.util.zip.DataFormatException;
import static com.viaversion.viaversion.api.minecraft.chunks.ChunkSection.SIZE;
import static com.viaversion.viaversion.api.minecraft.chunks.ChunkSectionLight.LIGHT_LENGTH;
@@ -49,33 +51,41 @@ public void write(ByteBuf buffer, Chunk chunk) {
final boolean biomes = chunk.isFullChunk() && chunk.getBiomeData() != null;
final int size = calcSize(bitmask, addBitmask, hasSkyLight, biomes);
- final byte[] data = new byte[size];
-
- serialize(chunk, data, 0, addBitmask, hasSkyLight, biomes);
+ final ByteBuf uncompressed = buffer.alloc().buffer(size);
- buffer.writeInt(chunk.getX());
- buffer.writeInt(chunk.getZ());
- buffer.writeBoolean(chunk.isFullChunk());
- buffer.writeShort(bitmask);
- buffer.writeShort(addBitmask);
-
- final Deflater deflater = new Deflater();
- byte[] compressedData;
- int compressedSize;
try {
- deflater.setInput(data, 0, data.length);
- deflater.finish();
- compressedData = new byte[data.length];
- compressedSize = deflater.deflate(compressedData);
+ serialize(chunk, uncompressed, addBitmask, hasSkyLight, biomes);
+
+ buffer.writeInt(chunk.getX());
+ buffer.writeInt(chunk.getZ());
+ buffer.writeBoolean(chunk.isFullChunk());
+ buffer.writeShort(bitmask);
+ buffer.writeShort(addBitmask);
+
+ // Reserve 4 bytes for the compressed size
+ final int sizeIndex = buffer.writerIndex();
+ buffer.writeInt(0); // Placeholder for compressed size
+
+ // Write compressed data directly to output buffer
+ final int compressedStart = buffer.writerIndex();
+ try {
+ ThreadLocalCompressionProvider.deflate(uncompressed, buffer);
+ } catch (DataFormatException e) {
+ throw new RuntimeException("Failed to compress chunk data", e);
+ }
+ final int compressedSize = buffer.writerIndex() - compressedStart;
+
+ // Go back and write the compressed size
+ final int endIndex = buffer.writerIndex();
+ buffer.writerIndex(sizeIndex);
+ buffer.writeInt(compressedSize);
+ buffer.writerIndex(endIndex);
} finally {
- deflater.end();
+ uncompressed.release();
}
-
- buffer.writeInt(compressedSize);
- buffer.writeBytes(compressedData, 0, compressedSize);
}
- public static int serialize(Chunk chunk, byte[] output, int offset, int addBitmask, boolean writeSkyLight, boolean biomes) {
+ public static void serialize(Chunk chunk, ByteBuf output, int addBitmask, boolean writeSkyLight, boolean biomes) {
final ChunkSection[] sections = chunk.getSections();
final int bitmask = chunk.getBitmask();
@@ -85,7 +95,7 @@ public static int serialize(Chunk chunk, byte[] output, int offset, int addBitma
final DataPalette palette = section.palette(PaletteType.BLOCKS);
for (int j = 0; j < SIZE; j++) {
final int block = palette.idAt(j);
- output[offset++] = (byte) ((block >> 4) & 0xFF);
+ output.writeByte((block >> 4) & 0xFF);
}
}
}
@@ -97,7 +107,7 @@ public static int serialize(Chunk chunk, byte[] output, int offset, int addBitma
for (int j = 0; j < ChunkSection.SIZE; j += 2) {
final int meta1 = palette.idAt(j) & 0xF;
final int meta2 = palette.idAt(j + 1) & 0xF;
- output[offset++] = (byte) (meta1 | (meta2 << 4));
+ output.writeByte(meta1 | (meta2 << 4));
}
}
}
@@ -105,8 +115,7 @@ public static int serialize(Chunk chunk, byte[] output, int offset, int addBitma
for (int i = 0; i < 16; i++) {
if ((bitmask & (1 << i)) != 0) {
final byte[] blockLight = sections[i].getLight().getBlockLight();
- System.arraycopy(blockLight, 0, output, offset, LIGHT_LENGTH);
- offset += LIGHT_LENGTH;
+ output.writeBytes(blockLight);
}
}
@@ -115,9 +124,11 @@ public static int serialize(Chunk chunk, byte[] output, int offset, int addBitma
if ((bitmask & (1 << i)) != 0) {
if (sections[i].getLight().hasSkyLight()) {
final byte[] skyLight = sections[i].getLight().getSkyLight();
- System.arraycopy(skyLight, 0, output, offset, LIGHT_LENGTH);
+ output.writeBytes(skyLight);
+ } else {
+ // Write empty skylight data
+ output.writeZero(LIGHT_LENGTH);
}
- offset += LIGHT_LENGTH;
}
}
}
@@ -130,7 +141,7 @@ public static int serialize(Chunk chunk, byte[] output, int offset, int addBitma
for (int j = 0; j < SIZE; j += 2) {
final int add1 = (palette.idAt(j) >> 12) & 0xF;
final int add2 = (palette.idAt(j + 1) >> 12) & 0xF;
- output[offset++] = (byte) (add1 | (add2 << 4));
+ output.writeByte(add1 | (add2 << 4));
}
}
}
@@ -139,11 +150,9 @@ public static int serialize(Chunk chunk, byte[] output, int offset, int addBitma
if (biomes && chunk.getBiomeData() != null) {
final int[] biomeData = chunk.getBiomeData();
for (int biome : biomeData) {
- output[offset++] = (byte) biome;
+ output.writeByte(biome);
}
}
-
- return offset;
}
public static int calcSize(int bitmask, int addBitmask, boolean hasSkyLight, boolean biomes) {
diff --git a/common/src/main/java/com/viaversion/viarewind/protocol/v1_8to1_7_6_10/provider/CompressionHandlerProvider.java b/common/src/main/java/com/viaversion/viarewind/protocol/v1_8to1_7_6_10/provider/CompressionHandlerProvider.java
index ed33cb105..f1acbb1ff 100644
--- a/common/src/main/java/com/viaversion/viarewind/protocol/v1_8to1_7_6_10/provider/CompressionHandlerProvider.java
+++ b/common/src/main/java/com/viaversion/viarewind/protocol/v1_8to1_7_6_10/provider/CompressionHandlerProvider.java
@@ -20,7 +20,6 @@
import com.viaversion.viarewind.protocol.v1_8to1_7_6_10.storage.CompressionStatusTracker;
import com.viaversion.viaversion.api.connection.UserConnection;
import com.viaversion.viaversion.api.platform.providers.Provider;
-import io.netty.channel.ChannelHandler;
public abstract class CompressionHandlerProvider implements Provider {
@@ -28,23 +27,6 @@ public abstract class CompressionHandlerProvider implements Provider {
public abstract void onTransformPacket(UserConnection user);
- /**
- * Creates the compression encoder
- *
- * @param compressor A nullable object that may present a native backend for compression (if available)
- * @param threshold The compression threshold
- * @return The encoder
- */
- public abstract ChannelHandler getEncoder(Object compressor, int threshold);
-
- /**
- * Creates the compression decoder
- *
- * @param compressor A nullable object that may present a native backend for compression (if available)
- * @param threshold The compression threshold
- * @return The decoder
- */
- public abstract ChannelHandler getDecoder(Object compressor, int threshold);
public boolean isRemoveCompression(UserConnection user) {
return user.get(CompressionStatusTracker.class).removeCompression;
diff --git a/common/src/main/java/com/viaversion/viarewind/protocol/v1_8to1_7_6_10/provider/compression/CompressionDecoder.java b/common/src/main/java/com/viaversion/viarewind/protocol/v1_8to1_7_6_10/provider/compression/CompressionDecoder.java
index 4e280a928..4b9795b9d 100644
--- a/common/src/main/java/com/viaversion/viarewind/protocol/v1_8to1_7_6_10/provider/compression/CompressionDecoder.java
+++ b/common/src/main/java/com/viaversion/viarewind/protocol/v1_8to1_7_6_10/provider/compression/CompressionDecoder.java
@@ -17,18 +17,15 @@
*/
package com.viaversion.viarewind.protocol.v1_8to1_7_6_10.provider.compression;
+import com.viaversion.viarewind.api.compression.ThreadLocalCompressionProvider;
import com.viaversion.viaversion.api.type.Types;
import io.netty.buffer.ByteBuf;
-import io.netty.buffer.ByteBufAllocator;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.DecoderException;
import io.netty.handler.codec.MessageToMessageDecoder;
import java.util.List;
-import java.util.zip.DataFormatException;
-import java.util.zip.Inflater;
public class CompressionDecoder extends MessageToMessageDecoder {
- private final Inflater inflater = new Inflater();
private int threshold;
@@ -58,25 +55,12 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List