@@ -58,7 +58,7 @@ int32_t aarch32_get_branch_offset(uint32_t insn)
* Check the imm signed bit. If the imm is a negative value, we
* have to extend the imm to a full 32 bit negative value.
*/
- if ( imm & BIT(23) )
+ if ( imm & BIT(23, UL) )
imm |= GENMASK(31, 24);
return (int32_t)(imm << 2);
@@ -45,40 +45,40 @@ static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
switch (type) {
case AARCH64_INSN_IMM_26:
- mask = BIT(26) - 1;
+ mask = BIT(26, UL) - 1;
shift = 0;
break;
case AARCH64_INSN_IMM_19:
- mask = BIT(19) - 1;
+ mask = BIT(19, UL) - 1;
shift = 5;
break;
case AARCH64_INSN_IMM_16:
- mask = BIT(16) - 1;
+ mask = BIT(16, UL) - 1;
shift = 5;
break;
case AARCH64_INSN_IMM_14:
- mask = BIT(14) - 1;
+ mask = BIT(14, UL) - 1;
shift = 5;
break;
case AARCH64_INSN_IMM_12:
- mask = BIT(12) - 1;
+ mask = BIT(12, UL) - 1;
shift = 10;
break;
case AARCH64_INSN_IMM_9:
- mask = BIT(9) - 1;
+ mask = BIT(9, UL) - 1;
shift = 12;
break;
case AARCH64_INSN_IMM_7:
- mask = BIT(7) - 1;
+ mask = BIT(7, UL) - 1;
shift = 15;
break;
case AARCH64_INSN_IMM_6:
case AARCH64_INSN_IMM_S:
- mask = BIT(6) - 1;
+ mask = BIT(6, UL) - 1;
shift = 10;
break;
case AARCH64_INSN_IMM_R:
- mask = BIT(6) - 1;
+ mask = BIT(6, UL) - 1;
shift = 16;
break;
default:
@@ -363,11 +363,12 @@ static int its_map_baser(void __iomem *basereg, uint64_t regc,
* attributes), retrying if necessary.
*/
retry:
- table_size = ROUNDUP(nr_items * entry_size, BIT(BASER_PAGE_BITS(pagesz)));
+ table_size = ROUNDUP(nr_items * entry_size,
+ BIT(BASER_PAGE_BITS(pagesz), UL));
/* The BASE registers support at most 256 pages. */
table_size = min(table_size, 256U << BASER_PAGE_BITS(pagesz));
- buffer = _xzalloc(table_size, BIT(BASER_PAGE_BITS(pagesz)));
+ buffer = _xzalloc(table_size, BIT(BASER_PAGE_BITS(pagesz), UL));
if ( !buffer )
return -ENOMEM;
@@ -483,7 +484,7 @@ static int gicv3_its_init_single_its(struct host_its *hw_its)
case GITS_BASER_TYPE_NONE:
continue;
case GITS_BASER_TYPE_DEVICE:
- ret = its_map_baser(basereg, reg, BIT(hw_its->devid_bits));
+ ret = its_map_baser(basereg, reg, BIT(hw_its->devid_bits, UL));
if ( ret )
return ret;
break;
@@ -635,7 +636,7 @@ int gicv3_its_map_guest_device(struct domain *d,
return ret;
/* Sanitise the provided hardware values against the host ITS. */
- if ( host_devid >= BIT(hw_its->devid_bits) )
+ if ( host_devid >= BIT(hw_its->devid_bits, UL) )
return -EINVAL;
/*
@@ -645,10 +646,10 @@ int gicv3_its_map_guest_device(struct domain *d,
* TODO: Investigate if the number of events can be limited to smaller
* values if the guest does not require that many.
*/
- nr_events = BIT(fls(nr_events - 1));
+ nr_events = BIT(fls(nr_events - 1), UL);
if ( nr_events < LPI_BLOCK )
nr_events = LPI_BLOCK;
- if ( nr_events >= BIT(hw_its->evid_bits) )
+ if ( nr_events >= BIT(hw_its->evid_bits, UL) )
return -EINVAL;
/* check for already existing mappings */
@@ -392,14 +392,14 @@ int gicv3_lpi_init_host_lpis(unsigned int host_lpi_bits)
printk(XENLOG_WARNING "WARNING: max_lpi_bits must be between 14 and 32, adjusting.\n");
max_lpi_bits = max(max_lpi_bits, 14U);
- lpi_data.max_host_lpi_ids = BIT(min(host_lpi_bits, max_lpi_bits));
+ lpi_data.max_host_lpi_ids = BIT(min(host_lpi_bits, max_lpi_bits), UL);
/*
* Warn if the number of LPIs are quite high, as the user might not want
* to waste megabytes of memory for a mostly empty table.
* It's very unlikely that we need more than 24 bits worth of LPIs.
*/
- if ( lpi_data.max_host_lpi_ids > BIT(24) )
+ if ( lpi_data.max_host_lpi_ids > BIT(24, UL) )
warning_add("Using high number of LPIs, limit memory usage with max_lpi_bits\n");
spin_lock_init(&lpi_data.host_lpis_lock);
@@ -327,8 +327,8 @@ static unsigned int get_top_bit(struct domain *d, vaddr_t gva, register_t tcr)
topbit = 31;
else
{
- if ( ((gva & BIT_ULL(55)) && (tcr & TCR_EL1_TBI1)) ||
- (!(gva & BIT_ULL(55)) && (tcr & TCR_EL1_TBI0)) )
+ if ( ((gva & BIT(55, ULL)) && (tcr & TCR_EL1_TBI1)) ||
+ (!(gva & BIT(55, ULL)) && (tcr & TCR_EL1_TBI0)) )
topbit = 55;
else
topbit = 63;
@@ -419,7 +419,7 @@ static bool guest_walk_ld(const struct vcpu *v,
{
/* Select the TTBR(0|1)_EL1 that will be used for address translation. */
- if ( (gva & BIT_ULL(topbit)) == 0 )
+ if ( (gva & BIT(topbit, ULL)) == 0 )
{
input_size = 64 - t0_sz;
@@ -554,7 +554,7 @@ static bool guest_walk_ld(const struct vcpu *v,
* inherited by page table attributes (ARM DDI 0487B.a J1-5928).
*/
xn_table |= pte.pt.xnt; /* Execute-Never */
- ro_table |= pte.pt.apt & BIT(1); /* Read-Only */
+ ro_table |= pte.pt.apt & BIT(1, UL);/* Read-Only */
/* Compute the base address of the next level translation table. */
mask = GENMASK_ULL(47, grainsizes[gran]);
@@ -97,7 +97,7 @@ typedef uint16_t coll_table_entry_t;
*/
typedef uint64_t dev_table_entry_t;
#define DEV_TABLE_ITT_ADDR(x) ((x) & GENMASK(51, 8))
-#define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(4, 0)) + 1))
+#define DEV_TABLE_ITT_SIZE(x) (BIT(((x) & GENMASK(4, 0)) + 1, UL))
#define DEV_TABLE_ENTRY(addr, bits) \
(((addr) & GENMASK(51, 8)) | (((bits) - 1) & GENMASK(4, 0)))
@@ -111,7 +111,7 @@ typedef uint64_t dev_table_entry_t;
*/
static paddr_t get_baser_phys_addr(uint64_t reg)
{
- if ( reg & BIT(9) )
+ if ( reg & BIT(9, UL) )
return (reg & GENMASK(47, 16)) |
((reg & GENMASK(15, 12)) << 36);
else
@@ -125,7 +125,7 @@ static int its_set_collection(struct virt_its *its, uint16_t collid,
paddr_t addr = get_baser_phys_addr(its->baser_coll);
/* The collection table entry must be able to store a VCPU ID. */
- BUILD_BUG_ON(BIT(sizeof(coll_table_entry_t) * 8) < MAX_VIRT_CPUS);
+ BUILD_BUG_ON(BIT(sizeof(coll_table_entry_t) * 8, UL) < MAX_VIRT_CPUS);
ASSERT(spin_is_locked(&its->its_lock));
@@ -690,7 +690,7 @@ static int its_handle_mapd(struct virt_its *its, uint64_t *cmdptr)
*/
ret = gicv3_its_map_guest_device(its->d, its->doorbell_address, devid,
its->doorbell_address, devid,
- BIT(size), valid);
+ BIT(size, UL), valid);
if ( ret && valid )
return ret;
}
@@ -1356,8 +1356,8 @@ static int vgic_v3_its_mmio_write(struct vcpu *v, mmio_info_t *info,
if ( reg & GITS_VALID_BIT )
{
its->max_devices = its_baser_nr_entries(reg);
- if ( its->max_devices > BIT(its->devid_bits) )
- its->max_devices = BIT(its->devid_bits);
+ if ( its->max_devices > BIT(its->devid_bits, UL) )
+ its->max_devices = BIT(its->devid_bits, UL);
}
else
its->max_devices = 0;
@@ -442,7 +442,7 @@ static uint64_t sanitize_pendbaser(uint64_t reg)
static void vgic_vcpu_enable_lpis(struct vcpu *v)
{
uint64_t reg = v->domain->arch.vgic.rdist_propbase;
- unsigned int nr_lpis = BIT((reg & 0x1f) + 1);
+ unsigned int nr_lpis = BIT((reg & 0x1f) + 1, UL);
/* rdists_enabled is protected by the domain lock. */
ASSERT(spin_is_locked(&v->domain->arch.vgic.lock));
@@ -65,7 +65,7 @@ struct vgic_irq_rank *vgic_rank_irq(struct vcpu *v, unsigned int irq)
void vgic_init_pending_irq(struct pending_irq *p, unsigned int virq)
{
/* The lpi_vcpu_id field must be big enough to hold a VCPU ID. */
- BUILD_BUG_ON(BIT(sizeof(p->lpi_vcpu_id) * 8) < MAX_VIRT_CPUS);
+ BUILD_BUG_ON(BIT(sizeof(p->lpi_vcpu_id) * 8, UL) < MAX_VIRT_CPUS);
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->inflight);
@@ -31,16 +31,16 @@
#define AML_UART_MISC_REG 0x10
/* UART_CONTROL bits */
-#define AML_UART_TX_RST BIT(22)
-#define AML_UART_RX_RST BIT(23)
-#define AML_UART_CLEAR_ERR BIT(24)
-#define AML_UART_RX_INT_EN BIT(27)
-#define AML_UART_TX_INT_EN BIT(28)
+#define AML_UART_TX_RST BIT(22, UL)
+#define AML_UART_RX_RST BIT(23, UL)
+#define AML_UART_CLEAR_ERR BIT(24, UL)
+#define AML_UART_RX_INT_EN BIT(27, UL)
+#define AML_UART_TX_INT_EN BIT(28, UL)
/* UART_STATUS bits */
-#define AML_UART_RX_FIFO_EMPTY BIT(20)
-#define AML_UART_TX_FIFO_FULL BIT(21)
-#define AML_UART_TX_FIFO_EMPTY BIT(22)
+#define AML_UART_RX_FIFO_EMPTY BIT(20, UL)
+#define AML_UART_TX_FIFO_FULL BIT(21, UL)
+#define AML_UART_TX_FIFO_EMPTY BIT(22, UL)
#define AML_UART_TX_CNT_MASK GENMASK(14, 8)
/* AML_UART_MISC bits */
@@ -29,27 +29,27 @@
#define UART_TX_REG 0x04
#define UART_CTRL_REG 0x08
-#define CTRL_TXFIFO_RST BIT(15)
-#define CTRL_RXFIFO_RST BIT(14)
-#define CTRL_TX_RDY_INT BIT(5)
-#define CTRL_RX_RDY_INT BIT(4)
-#define CTRL_BRK_DET_INT BIT(3)
-#define CTRL_FRM_ERR_INT BIT(2)
-#define CTRL_PAR_ERR_INT BIT(1)
-#define CTRL_OVR_ERR_INT BIT(0)
+#define CTRL_TXFIFO_RST BIT(15, UL)
+#define CTRL_RXFIFO_RST BIT(14, UL)
+#define CTRL_TX_RDY_INT BIT(5, UL)
+#define CTRL_RX_RDY_INT BIT(4, UL)
+#define CTRL_BRK_DET_INT BIT(3, UL)
+#define CTRL_FRM_ERR_INT BIT(2, UL)
+#define CTRL_PAR_ERR_INT BIT(1, UL)
+#define CTRL_OVR_ERR_INT BIT(0, UL)
#define CTRL_ERR_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \
CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
#define UART_STATUS_REG 0x0c
-#define STATUS_TXFIFO_EMP BIT(13)
-#define STATUS_TXFIFO_FUL BIT(11)
-#define STATUS_TXFIFO_HFL BIT(10)
-#define STATUS_TX_RDY BIT(5)
-#define STATUS_RX_RDY BIT(4)
-#define STATUS_BRK_DET BIT(3)
-#define STATUS_FRM_ERR BIT(2)
-#define STATUS_PAR_ERR BIT(1)
-#define STATUS_OVR_ERR BIT(0)
+#define STATUS_TXFIFO_EMP BIT(13, UL)
+#define STATUS_TXFIFO_FUL BIT(11, UL)
+#define STATUS_TXFIFO_HFL BIT(10, UL)
+#define STATUS_TX_RDY BIT(5, UL)
+#define STATUS_RX_RDY BIT(4, UL)
+#define STATUS_BRK_DET BIT(3, UL)
+#define STATUS_FRM_ERR BIT(2, UL)
+#define STATUS_PAR_ERR BIT(1, UL)
+#define STATUS_OVR_ERR BIT(0, UL)
#define STATUS_BRK_ERR (STATUS_BRK_DET | STATUS_FRM_ERR | \
STATUS_PAR_ERR | STATUS_OVR_ERR)
@@ -21,10 +21,8 @@
#define __clear_bit(n,p) clear_bit(n,p)
#define BITS_PER_WORD 32
-#define BIT(nr) (1UL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_WORD))
#define BIT_WORD(nr) ((nr) / BITS_PER_WORD)
-#define BIT_ULL(nr) (1ULL << (nr))
#define BITS_PER_BYTE 8
#define ADDR (*(volatile int *) addr)
@@ -149,9 +149,9 @@
(7UL << GICR_PENDBASER_INNER_CACHEABILITY_SHIFT)
#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \
(7UL << GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT)
-#define GICR_PENDBASER_PTZ BIT(62)
+#define GICR_PENDBASER_PTZ BIT(62, UL)
#define GICR_PENDBASER_RES0_MASK \
- (BIT(63) | GENMASK(61, 59) | GENMASK(55, 52) | \
+ (BIT(63, UL) | GENMASK(61, 59) | GENMASK(55, 52) | \
GENMASK(15, 12) | GENMASK(6, 0))
#define DEFAULT_PMR_VALUE 0xff
@@ -38,12 +38,12 @@
#define GITS_PIDR2 GICR_PIDR2
/* Register bits */
-#define GITS_VALID_BIT BIT(63)
+#define GITS_VALID_BIT BIT(63, UL)
-#define GITS_CTLR_QUIESCENT BIT(31)
-#define GITS_CTLR_ENABLE BIT(0)
+#define GITS_CTLR_QUIESCENT BIT(31, UL)
+#define GITS_CTLR_ENABLE BIT(0, UL)
-#define GITS_TYPER_PTA BIT(19)
+#define GITS_TYPER_PTA BIT(19, UL)
#define GITS_TYPER_DEVIDS_SHIFT 13
#define GITS_TYPER_DEVIDS_MASK (0x1fUL << GITS_TYPER_DEVIDS_SHIFT)
#define GITS_TYPER_DEVICE_ID_BITS(r) (((r & GITS_TYPER_DEVIDS_MASK) >> \
@@ -60,7 +60,7 @@
GITS_TYPER_ITT_SIZE_SHIFT) + 1)
#define GITS_TYPER_PHYSICAL (1U << 0)
-#define GITS_BASER_INDIRECT BIT(62)
+#define GITS_BASER_INDIRECT BIT(62, UL)
#define GITS_BASER_INNER_CACHEABILITY_SHIFT 59
#define GITS_BASER_TYPE_SHIFT 56
#define GITS_BASER_TYPE_MASK (7ULL << GITS_BASER_TYPE_SHIFT)
@@ -21,4 +21,6 @@
#define _AT(T,X) ((T)(X))
#endif
+#define BIT(pos, sfx) (_AC(1, sfx) << (pos))
+
#endif /* __XEN_CONST_H__ */
Arm currently provides two macro BIT and BIT_ULL that are only usable in C and return respectively unsigned long and unsigned long long. Extending the macros to deal with assembly would be a nice benefits as it could replace the common pattern to define fields (AC(1, sfx) << X) easier to read. Rather than extending the two macros, it was decided to drop BIT_ULL() and extend the macro BIT() to take a suffix (e.g U, UL, ULL) in parameter. This would allow to use different suffix without having to define new macros. The new extend macro is now moved in include/xen/const.h so it can be used by anyone in Xen and also avoid to include bitops.h in assembly code. Signed-off-by: Julien Grall <julien.grall@arm.com> --- Changes in v2: - Replace "xen/const: Introduce _BITUL and _BITULL" --- xen/arch/arm/arm32/insn.c | 2 +- xen/arch/arm/arm64/insn.c | 18 +++++++++--------- xen/arch/arm/gic-v3-its.c | 13 +++++++------ xen/arch/arm/gic-v3-lpi.c | 4 ++-- xen/arch/arm/guest_walk.c | 8 ++++---- xen/arch/arm/vgic-v3-its.c | 12 ++++++------ xen/arch/arm/vgic-v3.c | 2 +- xen/arch/arm/vgic.c | 2 +- xen/drivers/char/meson-uart.c | 16 ++++++++-------- xen/drivers/char/mvebu-uart.c | 34 +++++++++++++++++----------------- xen/include/asm-arm/bitops.h | 2 -- xen/include/asm-arm/gic_v3_defs.h | 4 ++-- xen/include/asm-arm/gic_v3_its.h | 10 +++++----- xen/include/xen/const.h | 2 ++ 14 files changed, 65 insertions(+), 64 deletions(-)