@@ -167,6 +167,9 @@ typedef struct CPUTLBEntryFull {
/* @byte_swap indicates that all accesses use inverted endianness. */
bool byte_swap;
+ /* @force_aligned indicates that all accesses must be aligned. */
+ bool force_aligned;
+
/*
* Allow target-specific additions to this structure.
* This may be used to cache items from the guest cpu
@@ -1146,7 +1146,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
/* Repeat the MMU check and TLB fill on every access. */
address |= TLB_INVALID_MASK;
}
- if (full->byte_swap) {
+ if (full->byte_swap || full->force_aligned) {
address |= TLB_SLOW_PATH;
}
@@ -1944,16 +1944,19 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- CPUTLBEntryFull *full;
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
bool need_swap;
/* For anything that is unaligned, recurse through full_load. */
if ((addr & (size - 1)) != 0) {
+ /* Honor per-page alignment requirements. */
+ if (full->force_aligned) {
+ cpu_unaligned_access(env_cpu(env), addr, access_type,
+ mmu_idx, retaddr);
+ }
goto do_unaligned_access;
}
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
-
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */
@@ -2349,16 +2352,19 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
/* Handle anything that isn't just a straight memory access. */
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
- CPUTLBEntryFull *full;
+ CPUTLBEntryFull *full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
bool need_swap;
/* For anything that is unaligned, recurse through byte stores. */
if ((addr & (size - 1)) != 0) {
+ /* Honor per-page alignment requirements. */
+ if (full->force_aligned) {
+ cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
+ mmu_idx, retaddr);
+ }
goto do_unaligned_access;
}
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
-
/* Handle watchpoints. */
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
/* On watchpoint hit, this will longjmp out. */
Support per-page natural alignment checking. This will be used by Arm for pages mapped with memory type Device. Cc: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/exec/cpu-defs.h | 3 +++ accel/tcg/cputlb.c | 20 +++++++++++++------- 2 files changed, 16 insertions(+), 7 deletions(-)