aboutsummaryrefslogtreecommitdiff
path: root/packages/gcc/9.5.0/0029-aarch64-Simplify-probe-of-final-frame-allocation.patch
diff options
context:
space:
mode:
authorChris Packham <judge.packham@gmail.com>2023-09-13 17:10:20 +1200
committerChris Packham <judge.packham@gmail.com>2023-09-17 15:41:50 +1200
commitc175b21ce470a07875a5db86f21439f02f93df0e (patch)
treefb9e2da6156e247142f9c67fe21edd5a5978f66a /packages/gcc/9.5.0/0029-aarch64-Simplify-probe-of-final-frame-allocation.patch
parent977ed69427889a09f8e77ff9b60a6d50a18d8417 (diff)
downloadcrosstool-ng-c175b21ce470a07875a5db86f21439f02f93df0e.tar.gz
crosstool-ng-c175b21ce470a07875a5db86f21439f02f93df0e.tar.bz2
crosstool-ng-c175b21ce470a07875a5db86f21439f02f93df0e.zip
gcc: Bring in upstream fixes for CVE-2023-4039
Bring in the fixes for GCC 7 through 13. https://rtx.meta.security/mitigation/2023/09/12/CVE-2023-4039.html https://developer.arm.com/Arm%20Security%20Center/GCC%20Stack%20Protector%20Vulnerability%20AArch64 Signed-off-by: Chris Packham <judge.packham@gmail.com>
Diffstat (limited to 'packages/gcc/9.5.0/0029-aarch64-Simplify-probe-of-final-frame-allocation.patch')
-rw-r--r--packages/gcc/9.5.0/0029-aarch64-Simplify-probe-of-final-frame-allocation.patch111
1 files changed, 111 insertions, 0 deletions
diff --git a/packages/gcc/9.5.0/0029-aarch64-Simplify-probe-of-final-frame-allocation.patch b/packages/gcc/9.5.0/0029-aarch64-Simplify-probe-of-final-frame-allocation.patch
new file mode 100644
index 00000000..321a0022
--- /dev/null
+++ b/packages/gcc/9.5.0/0029-aarch64-Simplify-probe-of-final-frame-allocation.patch
@@ -0,0 +1,111 @@
+From f2684e63652bb251d22c79e40081c646df1f36b6 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Tue, 8 Aug 2023 01:57:26 +0100
+Subject: [PATCH 29/30] aarch64: Simplify probe of final frame allocation
+
+Previous patches ensured that the final frame allocation only needs
+a probe when the size is strictly greater than 1KiB. It's therefore
+safe to use the normal 1024 probe offset in all cases.
+
+The main motivation for doing this is to simplify the code and
+remove the number of special cases.
+
+gcc/
+ * config/aarch64/aarch64.c (aarch64_allocate_and_probe_stack_space):
+ Always probe the residual allocation at offset 1024, asserting
+ that that is in range.
+
+gcc/testsuite/
+ * gcc.target/aarch64/stack-check-prologue-17.c: Expect the probe
+ to be at offset 1024 rather than offset 0.
+ * gcc.target/aarch64/stack-check-prologue-18.c: Likewise.
+---
+ gcc/config/aarch64/aarch64.c | 12 ++++--------
+ .../gcc.target/aarch64/stack-check-prologue-17.c | 2 +-
+ .../gcc.target/aarch64/stack-check-prologue-18.c | 7 +++++--
+ 3 files changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
+index 1e8467fdd03f..705f719a2eaa 100644
+--- a/gcc/config/aarch64/aarch64.c
++++ b/gcc/config/aarch64/aarch64.c
+@@ -5695,16 +5695,12 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ are still safe. */
+ if (residual)
+ {
+- HOST_WIDE_INT residual_probe_offset = guard_used_by_caller;
++ gcc_assert (guard_used_by_caller + byte_sp_alignment <= size);
++
+ /* If we're doing final adjustments, and we've done any full page
+ allocations then any residual needs to be probed. */
+ if (final_adjustment_p && rounded_size != 0)
+ min_probe_threshold = 0;
+- /* If doing a small final adjustment, we always probe at offset 0.
+- This is done to avoid issues when the final adjustment is smaller
+- than the probing offset. */
+- else if (final_adjustment_p && rounded_size == 0)
+- residual_probe_offset = 0;
+
+ aarch64_sub_sp (temp1, temp2, residual, frame_related_p);
+ if (residual >= min_probe_threshold)
+@@ -5715,8 +5711,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+ HOST_WIDE_INT_PRINT_DEC " bytes, probing will be required."
+ "\n", residual);
+
+- emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+- residual_probe_offset));
++ emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
++ guard_used_by_caller));
+ emit_insn (gen_blockage ());
+ }
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+index 0d8a25d73a24..f0ec1389771d 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
+@@ -33,7 +33,7 @@ int test1(int z) {
+ ** ...
+ ** str x30, \[sp\]
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+diff --git a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+index 82447d20fff5..71d33ba34e9e 100644
+--- a/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
++++ b/gcc/testsuite/gcc.target/aarch64/stack-check-prologue-18.c
+@@ -8,8 +8,9 @@ void g();
+ ** test1:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #4064
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+@@ -49,8 +50,9 @@ int test1(int z) {
+ ** test2:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #1040
+-** str xzr, \[sp\]
++** str xzr, \[sp, #?1024\]
+ ** cbnz w0, .*
+ ** bl g
+ ** ...
+@@ -77,6 +79,7 @@ int test2(int z) {
+ ** test3:
+ ** ...
+ ** str x30, \[sp\]
++** ...
+ ** sub sp, sp, #1024
+ ** cbnz w0, .*
+ ** bl g
+--
+2.42.0
+