aboutsummaryrefslogtreecommitdiff
path: root/packages/gcc/11.4.0/0026-aarch64-Simplify-probe-of-final-frame-allocation.patch
diff options
context:
space:
mode:
authorChris Packham <judge.packham@gmail.com>2023-09-13 17:10:20 +1200
committerChris Packham <judge.packham@gmail.com>2023-09-17 15:41:50 +1200
commitc175b21ce470a07875a5db86f21439f02f93df0e (patch)
treefb9e2da6156e247142f9c67fe21edd5a5978f66a /packages/gcc/11.4.0/0026-aarch64-Simplify-probe-of-final-frame-allocation.patch
parent977ed69427889a09f8e77ff9b60a6d50a18d8417 (diff)
downloadcrosstool-ng-c175b21ce470a07875a5db86f21439f02f93df0e.tar.gz
crosstool-ng-c175b21ce470a07875a5db86f21439f02f93df0e.tar.bz2
crosstool-ng-c175b21ce470a07875a5db86f21439f02f93df0e.zip
gcc: Bring in upstream fixes for CVE-2023-4039
Bring in the fixes for GCC 7 through 13. https://rtx.meta.security/mitigation/2023/09/12/CVE-2023-4039.html https://developer.arm.com/Arm%20Security%20Center/GCC%20Stack%20Protector%20Vulnerability%20AArch64 Signed-off-by: Chris Packham <judge.packham@gmail.com>
Diffstat (limited to 'packages/gcc/11.4.0/0026-aarch64-Simplify-probe-of-final-frame-allocation.patch')
-rw-r--r--packages/gcc/11.4.0/0026-aarch64-Simplify-probe-of-final-frame-allocation.patch99
1 files changed, 99 insertions, 0 deletions
diff --git a/packages/gcc/11.4.0/0026-aarch64-Simplify-probe-of-final-frame-allocation.patch b/packages/gcc/11.4.0/0026-aarch64-Simplify-probe-of-final-frame-allocation.patch
new file mode 100644
index 00000000..f3cfd734
--- /dev/null
+++ b/packages/gcc/11.4.0/0026-aarch64-Simplify-probe-of-final-frame-allocation.patch
@@ -0,0 +1,99 @@
+From e932e11c353be52256dd30d30d924f4e834e3ca3 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 12 Sep 2023 16:19:51 +0100
+Subject: [PATCH 26/29] aarch64: Simplify probe of final frame allocation
+
+Previous patches ensured that the final frame allocation only needs
+a probe when the size is strictly greater than 1KiB. It's therefore
+safe to use the normal 1024 probe offset in all cases.
+
+The main motivation for doing this is to simplify the code and
+remove the number of special cases.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_allocate_and_probe_stack_space):
+ Always probe the residual allocation at offset 1024, asserting
+ that that is in range.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-17.c: Expect the probe
+ to be at offset 1024 rather than offset 0.
+ * gcc.target/aarch64/stack-check-prologue-18.c: Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 12 ++++--------
+ .../gcc.target/aarch64/stack-check-prologue-17.c | 2 +-
+ .../gcc.target/aarch64/stack-check-prologue-18.c | 4 ++--
+ 3 files changed, 7 insertions(+), 11 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 3f2b10de987d..4b9cd687525e 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -8751,16 +8751,12 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ are still safe. */
+ if (residual)
+ {
+- HOST_WIDE_INT residual_probe_offset = guard_used_by_caller;
++ gcc_assert (guard_used_by_caller + byte_sp_alignment <= size);
++
+ /* If we're doing final adjustments, and we've done any full page
+ allocations then any residual needs to be probed. */
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+- /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when the final adjustment is smaller
+- than the probing offset. */
+- else if (final_adjustment_p && rounded_size == 0)
+- residual_probe_offset = 0;
+
+ aarch64_sub_sp (temp1, temp2, residual, frame_related_p);
+ if (residual >= min_probe_threshold)
+@@ -8771,8 +8767,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT_PRINT_DEC " bytes, probing will be required."
+ "\n", residual);
+
+- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+- residual_probe_offset));
++ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
++ guard_used_by_caller));
+ emit_insn (gen_blockage ());
+ }
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+index 0d8a25d73a24..f0ec1389771d 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -33,7 +33,7 @@ int test1(int z) {
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+index 82447d20fff5..6383bec5ebcd 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -9,7 +9,7 @@ void g();
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #4064
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+@@ -50,7 +50,7 @@ int test1(int z) {
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+--
+2.42.0
+