@@ -8,6 +8,7 @@
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
+#include <adf_gen4_timer.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -405,6 +406,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
+ hw_data->start_timer = adf_gen4_timer_start;
+ hw_data->stop_timer = adf_gen4_timer_stop;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
@@ -17,6 +17,7 @@ intel_qat-objs := adf_cfg.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
+ adf_gen4_timer.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
@@ -188,6 +188,8 @@ struct adf_hw_device_data {
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+ int (*start_timer)(struct adf_accel_dev *accel_dev);
+ void (*stop_timer)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
@@ -295,6 +297,7 @@ struct adf_accel_dev {
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
+ struct adf_timer *timer;
union {
struct {
/* protects VF2PF interrupts access */
@@ -223,6 +223,18 @@ static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
return 0;
}
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt)
+{
+ u32 ae_mask = accel_dev->hw_device->ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+
+ req.cmd_id = ICP_QAT_FW_SYNC;
+ req.int_timer_ticks = cnt;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
/**
* adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device.
@@ -95,6 +95,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
new file mode 100644
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_timer.h"
+
+#define ADF_GEN4_TIMER_VALUE_MS 200
+
+static unsigned long adf_get_next_timeout(void)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_GEN4_TIMER_VALUE_MS);
+
+ return rounddown(jiffies + timeout, timeout);
+}
+
+/* This periodic update is used to trigger HB, RL & TL fw events */
+static void timer_handler_bh(struct work_struct *work)
+{
+ struct adf_timer *timer_ctx = container_of(work, struct adf_timer, timer_bh);
+ struct adf_accel_dev *accel_dev = timer_ctx->accel_dev;
+
+ if (adf_send_admin_tim_sync(accel_dev, timer_ctx->cnt))
+ dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
+}
+
+static void timer_handler(struct timer_list *tl)
+{
+ struct adf_timer *timer_ctx = from_timer(timer_ctx, tl, timer);
+ unsigned long timeout_val = adf_get_next_timeout();
+
+ /* Schedule a work queue to send admin request */
+ adf_misc_wq_queue_work(&timer_ctx->timer_bh);
+
+ timer_ctx->cnt++;
+ mod_timer(tl, timeout_val);
+}
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
+{
+ unsigned long timeout_val = adf_get_next_timeout();
+ struct adf_timer *timer_ctx;
+
+ timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
+ if (!timer_ctx)
+ return -ENOMEM;
+
+ timer_ctx->accel_dev = accel_dev;
+ timer_ctx->cnt = 0;
+ accel_dev->timer = timer_ctx;
+
+ INIT_WORK(&timer_ctx->timer_bh, timer_handler_bh);
+ timer_setup(&timer_ctx->timer, timer_handler, 0);
+ mod_timer(&timer_ctx->timer, timeout_val);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
+
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx = accel_dev->timer;
+
+ if (!timer_ctx)
+ return;
+
+ del_timer_sync(&timer_ctx->timer);
+
+ kfree(timer_ctx);
+ accel_dev->timer = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
new file mode 100644
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_GEN4_TIMER_H_
+#define ADF_GEN4_TIMER_H_
+
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+struct adf_timer {
+ struct adf_accel_dev *accel_dev;
+ struct timer_list timer;
+ struct work_struct timer_bh;
+ u32 cnt;
+};
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_GEN4_TIMER_H_ */
@@ -163,6 +163,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
+ int ret;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -187,6 +188,14 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ if (hw_data->start_timer) {
+ ret = hw_data->start_timer(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
+ return ret;
+ }
+ }
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
@@ -235,6 +244,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
*/
static void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
bool wait = false;
@@ -270,6 +280,9 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
}
}
+ if (hw_data->stop_timer)
+ hw_data->stop_timer(accel_dev);
+
if (wait)
msleep(100);
@@ -37,6 +37,9 @@ struct icp_qat_fw_init_admin_req {
__u16 ibuf_size_in_kb;
__u16 resrvd3;
};
+ struct {
+ __u32 int_timer_ticks;
+ };
__u32 idle_filter;
};
@@ -97,6 +100,8 @@ struct icp_qat_fw_init_admin_resp {
};
} __packed;
+#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC
+
#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0