@@ -805,6 +805,86 @@ int amd_espi_periph_io_read(struct amd_espi *amd_espi, struct periph_io_rw *msg_
return CB_SUCCESS;
}
+static int is_mmio_address_valid(struct amd_espi *amd_espi, struct periph_mem_rw *mem_data)
+{
+ struct io_mmio_decode_config io_conf;
+ u32 mmio_end_addr = mem_data->addr + 3;
+
+ amd_espi_get_io_mmio_decode_info(amd_espi, &io_conf);
+ if (mem_data->addr >= io_conf.mmio_target_range0 &&
+ (mmio_end_addr <= ((u32)io_conf.mmio_target_range0 +
+ io_conf.mmio_range4.mmio_range0_size))) {
+ if (!(io_conf.io_mmio_dc_enable & MMIO_DECODE_RANGE0)) {
+ dev_err(amd_espi->dev, "MMIO range0 not enabled for address: 0x%x\n",
+ mem_data->addr);
+ return -EINVAL;
+ }
+ } else if ((mem_data->addr >= io_conf.mmio_target_range1) &&
+ (mmio_end_addr <= ((u32)io_conf.mmio_target_range1 +
+ io_conf.mmio_range4.mmio_range1_size))) {
+ if (!(io_conf.io_mmio_dc_enable & MMIO_DECODE_RANGE1)) {
+ dev_err(amd_espi->dev, "MMIO range1 not enabled for address: 0x%x\n",
+ mem_data->addr);
+ return -EINVAL;
+ }
+ } else if ((mem_data->addr >= io_conf.mmio_target_range2) &&
+ (mmio_end_addr <= ((u32)io_conf.mmio_target_range2 +
+ io_conf.mmio_range5.mmio_range2_size))) {
+ if (!(io_conf.io_mmio_dc_enable & MMIO_DECODE_RANGE2)) {
+ dev_err(amd_espi->dev, "MMIO range2 not enabled for address: 0x%x\n",
+ mem_data->addr);
+ return -EINVAL;
+ }
+ } else if ((mem_data->addr >= io_conf.mmio_target_range3) &&
+ (mmio_end_addr <= ((u32)io_conf.mmio_target_range3 +
+ io_conf.mmio_range5.mmio_range3_size))) {
+ if (!(io_conf.io_mmio_dc_enable & MMIO_DECODE_RANGE3)) {
+ dev_err(amd_espi->dev, "MMIO range3 not enabled for address: 0x%x\n",
+ mem_data->addr);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(amd_espi->dev, "Address 0x%x is invalid\n", mem_data->addr);
+ return -EINVAL;
+ }
+
+ return CB_SUCCESS;
+}
+
+int amd_espi_periph_mem_write(struct amd_espi *amd_espi, struct periph_mem_rw *mem_data)
+{
+ void __iomem *mmio_addr;
+
+ if (is_mmio_address_valid(amd_espi, mem_data) != CB_SUCCESS)
+ return -EINVAL;
+
+ mmio_addr = ioremap(mem_data->addr, 4);
+ if (!mmio_addr)
+ return -ENOMEM;
+
+ writel(mem_data->data, mmio_addr);
+ iounmap(mmio_addr);
+
+ return CB_SUCCESS;
+}
+
+int amd_espi_periph_mem_read(struct amd_espi *amd_espi, struct periph_mem_rw *mem_data)
+{
+ void __iomem *mmio_addr;
+
+ if (is_mmio_address_valid(amd_espi, mem_data) != CB_SUCCESS)
+ return -EINVAL;
+
+ mmio_addr = ioremap(mem_data->addr, 4);
+ if (!mmio_addr)
+ return -ENOMEM;
+
+ mem_data->data = readl(mmio_addr);
+ iounmap(mmio_addr);
+
+ return CB_SUCCESS;
+}
+
static int amd_espi_get_master_cap(struct amd_espi *amd_espi, struct espi_master *master)
{
u32 master_cap_reg, info;
@@ -372,6 +372,53 @@ static int amd_espi_ioctl_io_read(struct amd_espi *amd_espi, unsigned long arg)
return ret;
}
+static int amd_espi_ioctl_mem_write(struct amd_espi *amd_espi, unsigned long arg)
+{
+ struct periph_mem_rw *mem_data;
+ int ret;
+
+ mem_data = kzalloc(sizeof(*mem_data), GFP_KERNEL);
+ if (!mem_data)
+ return -ENOMEM;
+
+ if (copy_from_user(mem_data, (void __user *)arg, sizeof(struct periph_mem_rw))) {
+ ret = -EFAULT;
+ goto mem_write_free;
+ }
+
+ ret = amd_espi_periph_mem_write(amd_espi, mem_data);
+
+mem_write_free:
+ kfree(mem_data);
+ return ret;
+}
+
+static int amd_espi_ioctl_mem_read(struct amd_espi *amd_espi, unsigned long arg)
+{
+ struct periph_mem_rw *mem_data;
+ int ret;
+
+ mem_data = kzalloc(sizeof(*mem_data), GFP_KERNEL);
+ if (!mem_data)
+ return -ENOMEM;
+
+ if (copy_from_user(mem_data, (void __user *)arg, sizeof(struct periph_mem_rw))) {
+ ret = -EFAULT;
+ goto mem_read_free;
+ }
+
+ ret = amd_espi_periph_mem_read(amd_espi, mem_data);
+ if (ret != CB_SUCCESS)
+ goto mem_read_free;
+
+ if (copy_to_user((void __user *)arg, mem_data, sizeof(struct periph_mem_rw)))
+ ret = -EFAULT;
+
+mem_read_free:
+ kfree(mem_data);
+ return ret;
+}
+
static long amd_espi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct amd_espi *amd_espi = filp->private_data;
@@ -422,6 +469,12 @@ static long amd_espi_ioctl(struct file *filp, unsigned int cmd, unsigned long ar
case ESPI_IO_READ:
ret = amd_espi_ioctl_io_read(amd_espi, arg);
break;
+ case ESPI_MEM_WRITE:
+ ret = amd_espi_ioctl_mem_write(amd_espi, arg);
+ break;
+ case ESPI_MEM_READ:
+ ret = amd_espi_ioctl_mem_read(amd_espi, arg);
+ break;
default:
dev_err(amd_espi->dev, "ESPI command not found, returning error\n");
ret = -EINVAL;
@@ -178,6 +178,8 @@
#define ESPI_DS_IODECODE_CONFIG _IOWR(ESPI_MAGIC_NUMBER, 0x9, u32)
#define ESPI_IO_WRITE _IOWR(ESPI_MAGIC_NUMBER, 0xa, struct periph_io_rw)
#define ESPI_IO_READ _IOWR(ESPI_MAGIC_NUMBER, 0xb, struct periph_io_rw)
+#define ESPI_MEM_WRITE _IOWR(ESPI_MAGIC_NUMBER, 0xc, struct periph_mem_rw)
+#define ESPI_MEM_READ _IOWR(ESPI_MAGIC_NUMBER, 0xd, struct periph_mem_rw)
/*
* enum amd_espi_versions - eSPI controller versions
@@ -382,6 +384,11 @@ struct periph_io_rw {
union io_data data;
} __packed;
+struct periph_mem_rw {
+ u32 addr;
+ u32 data;
+};
+
/* Function prototypes */
int amd_espi_device_create(struct amd_espi *amd_espi, struct device *dev);
void amd_espi_device_remove(struct amd_espi *amd_espi);
@@ -404,4 +411,6 @@ void amd_espi_set_io_mmio_decode_config(struct amd_espi *amd_espi,
void amd_espi_disable_io_decode_range(struct amd_espi *amd_espi, u32 io_range);
int amd_espi_periph_io_write(struct amd_espi *amd_espi, struct periph_io_rw *message_io);
int amd_espi_periph_io_read(struct amd_espi *amd_espi, struct periph_io_rw *message_io);
+int amd_espi_periph_mem_write(struct amd_espi *amd_espi, struct periph_mem_rw *mem_data);
+int amd_espi_periph_mem_read(struct amd_espi *amd_espi, struct periph_mem_rw *mem_data);
#endif