Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,327 @@
From a664bf3d603dc3bdcf9ae47cc21e0daec706d7a5 Mon Sep 17 00:00:00 2001
From: Herbert Xu <herbert@gondor.apana.org.au>
Date: Thu, 26 Mar 2026 15:30:20 +0900
Subject: [PATCH] crypto: algif_aead - Revert to operating out-of-place

This mostly reverts commit 72548b093ee3 except for the copying of
the associated data.

There is no benefit in operating in-place in algif_aead since the
source and destination come from different mappings. Get rid of
all the complexity added for in-place operation and just copy the
AD directly.

[ Upstream commit a664bf3d603d ]

Backport to 6.12.x stable: rewritten _aead_recvmsg keeps
crypto_aead_copy_sgl(null_tfm, ...) for the AAD copy because 6.12.x
predates the move to memcpy_sglist(). Behaviour matches the upstream
patch: the AEAD operation runs out of place, AAD is copied src->dst,
and the offset / dst_offset parameters are removed from
af_alg_count_tsgl() and af_alg_pull_tsgl().

CVE: CVE-2026-31431
Fixes: 72548b093ee3 ("crypto: algif_aead - copy AAD from src to dst")
Reported-by: Taeyang Lee <0wn@theori.io>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Assisted-by: claude-opus-4-7 [via Claude Code]
---
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -635,15 +635,13 @@
/**
* af_alg_count_tsgl - Count number of TX SG entries
*
- * The counting starts from the beginning of the SGL to @bytes. If
- * an @offset is provided, the counting of the SG entries starts at the @offset.
+ * The counting starts from the beginning of the SGL to @bytes.
*
* @sk: socket of connection to user space
* @bytes: Count the number of SG entries holding given number of bytes.
- * @offset: Start the counting of SG entries from the given offset.
* Return: Number of TX SG entries found given the constraints
*/
-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
{
const struct alg_sock *ask = alg_sk(sk);
const struct af_alg_ctx *ctx = ask->private;
@@ -658,25 +656,11 @@
const struct scatterlist *sg = sgl->sg;

for (i = 0; i < sgl->cur; i++) {
- size_t bytes_count;
-
- /* Skip offset */
- if (offset >= sg[i].length) {
- offset -= sg[i].length;
- bytes -= sg[i].length;
- continue;
- }
-
- bytes_count = sg[i].length - offset;
-
- offset = 0;
sgl_count++;
-
- /* If we have seen requested number of bytes, stop */
- if (bytes_count >= bytes)
+ if (sg[i].length >= bytes)
return sgl_count;

- bytes -= bytes_count;
+ bytes -= sg[i].length;
}
}

@@ -688,19 +672,14 @@
* af_alg_pull_tsgl - Release the specified buffers from TX SGL
*
* If @dst is non-null, reassign the pages to @dst. The caller must release
- * the pages. If @dst_offset is given only reassign the pages to @dst starting
- * at the @dst_offset (byte). The caller must ensure that @dst is large
- * enough (e.g. by using af_alg_count_tsgl with the same offset).
+ * the pages.
*
* @sk: socket of connection to user space
* @used: Number of bytes to pull from TX SGL
* @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
* caller must release the buffers in dst.
- * @dst_offset: Reassign the TX SGL from given offset. All buffers before
- * reaching the offset is released.
*/
-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
- size_t dst_offset)
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -725,18 +704,10 @@
* SG entries in dst.
*/
if (dst) {
- if (dst_offset >= plen) {
- /* discard page before offset */
- dst_offset -= plen;
- } else {
- /* reassign page to dst after offset */
- get_page(page);
- sg_set_page(dst + j, page,
- plen - dst_offset,
- sg[i].offset + dst_offset);
- dst_offset = 0;
- j++;
- }
+ /* reassign page to dst after offset */
+ get_page(page);
+ sg_set_page(dst + j, page, plen, sg[i].offset);
+ j++;
}

sg[i].length -= plen;
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -26,7 +26,6 @@
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
-#include <crypto/skcipher.h>
#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -96,9 +95,8 @@
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
- unsigned int i, as = crypto_aead_authsize(tfm);
+ unsigned int as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
- struct af_alg_tsgl *tsgl, *tmp;
struct scatterlist *rsgl_src, *tsgl_src = NULL;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
@@ -178,23 +176,24 @@
outlen -= less;
}

+ /*
+ * Create a per request TX SGL for this request which tracks the
+ * SG entries from the global TX SGL.
+ */
processed = used + ctx->aead_assoclen;
- list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
- for (i = 0; i < tsgl->cur; i++) {
- struct scatterlist *process_sg = tsgl->sg + i;
-
- if (!(process_sg->length) || !sg_page(process_sg))
- continue;
- tsgl_src = process_sg;
- break;
- }
- if (tsgl_src)
- break;
- }
- if (processed && !tsgl_src) {
- err = -EFAULT;
+ areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
+ areq->tsgl_entries),
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
goto free;
}
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+ af_alg_pull_tsgl(sk, processed, areq->tsgl);
+ tsgl_src = areq->tsgl;

/*
* Copy of AAD from source to destination
@@ -203,83 +202,19 @@
* when user space uses an in-place cipher operation, the kernel
* will copy the data as it does not see whether such in-place operation
* is initiated.
- *
- * To ensure efficiency, the following implementation ensure that the
- * ciphers are invoked to perform a crypto operation in-place. This
- * is achieved by memory management specified as follows.
*/

- /* Use the RX SGL as source (and destination) for crypto op. */
+ /* Use the RX SGL as destination for crypto op. */
rsgl_src = areq->first_rsgl.sgl.sgt.sgl;

- if (ctx->enc) {
- /*
- * Encryption operation - The in-place cipher operation is
- * achieved by the following operation:
- *
- * TX SGL: AAD || PT
- * | |
- * | copy |
- * v v
- * RX SGL: AAD || PT || Tag
- */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl,
- processed);
- if (err)
- goto free;
- af_alg_pull_tsgl(sk, processed, NULL, 0);
- } else {
- /*
- * Decryption operation - To achieve an in-place cipher
- * operation, the following SGL structure is used:
- *
- * TX SGL: AAD || CT || Tag
- * | | ^
- * | copy | | Create SGL link.
- * v v |
- * RX SGL: AAD || CT ----+
- */
-
- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl,
- outlen);
- if (err)
- goto free;
-
- /* Create TX SGL for tag and chain it to RX SGL. */
- areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
- processed - as);
- if (!areq->tsgl_entries)
- areq->tsgl_entries = 1;
- areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
- areq->tsgl_entries),
- GFP_KERNEL);
- if (!areq->tsgl) {
- err = -ENOMEM;
- goto free;
- }
- sg_init_table(areq->tsgl, areq->tsgl_entries);
-
- /* Release TX SGL, except for tag data and reassign tag data. */
- af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
-
- /* chain the areq TX SGL holding the tag with RX SGL */
- if (usedpages) {
- /* RX SGL present */
- struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
- struct scatterlist *sg = sgl_prev->sgt.sgl;
-
- sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
- sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
- } else
- /* no RX SGL present (e.g. authentication only) */
- rsgl_src = areq->tsgl;
- }
+ /* Copy AAD from src (areq->tsgl) to dst (rsgl_src). */
+ err = crypto_aead_copy_sgl(null_tfm, tsgl_src, rsgl_src,
+ ctx->aead_assoclen);
+ if (err)
+ goto free;

/* Initialize the crypto operation */
- aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
+ aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
@@ -514,7 +449,7 @@
struct crypto_aead *tfm = aeadc->aead;
unsigned int ivlen = crypto_aead_ivsize(tfm);

- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -138,7 +138,7 @@
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
- areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
+ areq->tsgl_entries = af_alg_count_tsgl(sk, len);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
@@ -149,7 +149,7 @@
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
- af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
+ af_alg_pull_tsgl(sk, len, areq->tsgl);

/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
@@ -363,7 +363,7 @@
struct alg_sock *pask = alg_sk(psk);
struct crypto_skcipher *tfm = pask->private;

- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
if (ctx->state)
sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -230,9 +230,8 @@
return PAGE_SIZE <= af_alg_rcvbuf(sk);
}

-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
- size_t dst_offset);
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes);
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst);
void af_alg_wmem_wakeup(struct sock *sk);
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
--
2.45.0

6 changes: 5 additions & 1 deletion SPECS/linux/v6.12/linux-esx.spec
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
Summary: Kernel
Name: linux-esx
Version: 6.12.78
Release: 3%{?dist}
Release: 4%{?dist}
URL: http://www.kernel.org
Group: System Environment/Kernel
Vendor: VMware, Inc.
Expand Down Expand Up @@ -194,6 +194,8 @@ Patch91: 0001-block-Fix-validation-of-ioprio-level.patch
# CVE: [100..199]
# Fix CVE-2017-1000252
Patch101: KVM-Don-t-accept-obviously-wrong-gsi-values-via-KVM_.patch
# Fix CVE-2026-31431 (Copy.Fail) - algif_aead in-place revert
Patch130: 0001-crypto-algif_aead-CVE-2026-31431-Revert-to-out-of-place.patch

# aarch64 [200..219]
%ifarch aarch64
Expand Down Expand Up @@ -546,6 +548,8 @@ ln -sf linux-%{uname_r}.cfg /boot/photon.cfg
%{_usrsrc}/linux-headers-%{uname_r}

%changelog
* Thu Apr 30 2026 Claude AI bot <noreply@anthropic.com> 6.12.78-4
- Fix CVE-2026-31431: crypto: algif_aead - Revert to operating out-of-place
* Mon Apr 13 2026 Ankit Jain <ankit-aj.jain@broadcom.com> 6.12.78-3
- Enable AMD AI compute module.
* Fri Apr 10 2026 Ajay Kaher <ajay.kaher@broadcom.com> 6.12.78-2
Expand Down
6 changes: 5 additions & 1 deletion SPECS/linux/v6.12/linux.spec
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@
Summary: Kernel
Name: linux
Version: 6.12.78
Release: 4%{?acvp_build:.acvp}%{?kat_build:.kat}%{?dist}
Release: 5%{?acvp_build:.acvp}%{?kat_build:.kat}%{?dist}
URL: http://www.kernel.org/
Group: System Environment/Kernel
Vendor: VMware, Inc.
Expand Down Expand Up @@ -239,6 +239,8 @@ Patch71: 0001-block-Fix-validation-of-ioprio-level.patch
# CVE: [100..199]
# Fix CVE-2017-1000252
Patch101: KVM-Don-t-accept-obviously-wrong-gsi-values-via-KVM_.patch
# Fix CVE-2026-31431 (Copy.Fail) - algif_aead in-place revert
Patch130: 0001-crypto-algif_aead-CVE-2026-31431-Revert-to-out-of-place.patch

%ifarch aarch64
# aarch specific patches [200..219]
Expand Down Expand Up @@ -971,6 +973,8 @@ ln -sf linux-%{uname_r}.cfg /boot/photon.cfg
%endif

%changelog
* Thu Apr 30 2026 Claude AI bot <noreply@anthropic.com> 6.12.78-5
- Fix CVE-2026-31431: crypto: algif_aead - Revert to operating out-of-place
* Mon Apr 27 2026 Ajay Kaher <ajay.kaher@broadcom.com> 6.12.78-4
- Disable CONFIG_PER_VMA_LOCK
* Fri Apr 10 2026 Keerthana K <keerthana.kalyanasundaram@broadcom.com> 6.12.78-3
Expand Down