On 21/02/23 21:35, Pierre-Louis Bossart wrote:
>> +static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
>> +{
>> + u64 response;
>> +
>> + mutex_lock(&amd_manager->bus.msg_lock);
>> + response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
>> + mutex_unlock(&amd_manager->bus.msg_lock);
>> + amd_sdw_process_ping_status(response, amd_manager);
> do you have a case where a new command could be sent after the
> mutex_unlock(), which could change the response fields?
No. There won't be a new command which will change response fields.
We are using lock to prevent peripheral registers read/writes during
sending ping command.
This implementation is used to send ping command and read and process
peripheral status when PREQ is asserted and this function is also invoked
after peripheral enumeration sequence is completed to collect updated
peripheral status.
> In other words, should the last amd_sdw_process_ping_status() function
> be protected as well?
IMO, it's not needed.
>> +}
@@ -417,6 +417,51 @@ static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sd
return SDW_CMD_OK;
}
+static void amd_sdw_fill_slave_status(struct amd_sdw_manager *amd_manager, u16 index, u32 status)
+{
+ switch (status) {
+ case SDW_SLAVE_ATTACHED:
+ amd_manager->status[index] = SDW_SLAVE_ATTACHED;
+ break;
+ case SDW_SLAVE_UNATTACHED:
+ amd_manager->status[index] = SDW_SLAVE_UNATTACHED;
+ break;
+ case SDW_SLAVE_ALERT:
+ amd_manager->status[index] = SDW_SLAVE_ALERT;
+ break;
+ default:
+ amd_manager->status[index] = SDW_SLAVE_RESERVED;
+ break;
+ }
+}
+
+static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
+{
+ u64 slave_stat;
+ u32 val;
+ u16 dev_index;
+
+ /* slave status response */
+ slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
+ slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
+ dev_dbg(amd_manager->dev, "slave_stat:0x%llx\n", slave_stat);
+ for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
+ val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
+ dev_dbg(amd_manager->dev, "val:0x%x\n", val);
+ amd_sdw_fill_slave_status(amd_manager, dev_index, val);
+ }
+}
+
+static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
+{
+ u64 response;
+
+ mutex_lock(&amd_manager->bus.msg_lock);
+ response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
+ mutex_unlock(&amd_manager->bus.msg_lock);
+ amd_sdw_process_ping_status(response, amd_manager);
+}
+
static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
{
struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
@@ -817,6 +862,89 @@ static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
dais, num_dais);
}
+static void amd_sdw_update_slave_status_work(struct work_struct *work)
+{
+ struct amd_sdw_manager *amd_manager =
+ container_of(work, struct amd_sdw_manager, amd_sdw_work);
+ int retry_count = 0;
+
+ if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
+ acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
+ acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+ }
+
+update_status:
+ sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
+ /*
+ * During the peripheral enumeration sequence, the SoundWire manager interrupts
+ * are masked. Once the device number programming is done for all peripherals,
+ * interrupts will be unmasked. Read the peripheral device status from ping command
+ * and process the response. This sequence will ensure all peripheral devices enumerated
+ * and initialized properly.
+ */
+ if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
+ if (retry_count++ < SDW_MAX_DEVICES) {
+ acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
+ ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
+ acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
+ amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
+ amd_sdw_read_and_process_ping_status(amd_manager);
+ goto update_status;
+ } else {
+ dev_err_ratelimited(amd_manager->dev,
+ "Device0 detected after %d iterations\n",
+ retry_count);
+ }
+ }
+}
+
+static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
+ struct amd_sdw_manager *amd_manager)
+{
+ u64 slave_stat;
+ u32 val;
+ int dev_index;
+
+ if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
+ memset(amd_manager->status, 0, sizeof(amd_manager->status));
+ slave_stat = status_change_0to7;
+ slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
+ dev_dbg(amd_manager->dev, "status_change_0to7:0x%x status_change_8to11:0x%x\n",
+ status_change_0to7, status_change_8to11);
+ if (slave_stat) {
+ for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
+ if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
+ val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
+ AMD_SDW_MCP_SLAVE_STATUS_MASK;
+ amd_sdw_fill_slave_status(amd_manager, dev_index, val);
+ }
+ }
+ }
+}
+
+static void amd_sdw_irq_thread(struct work_struct *work)
+{
+ struct amd_sdw_manager *amd_manager =
+ container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
+ u32 status_change_8to11;
+ u32 status_change_0to7;
+
+ status_change_8to11 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
+ status_change_0to7 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
+ dev_dbg(amd_manager->dev, "[SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
+ amd_manager->instance, status_change_0to7, status_change_8to11);
+ if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
+ amd_sdw_read_and_process_ping_status(amd_manager);
+ } else {
+ /* Check for the updated status on peripheral device */
+ amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
+ }
+ if (status_change_8to11 || status_change_0to7)
+ schedule_work(&amd_manager->amd_sdw_work);
+ acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
+ acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
+}
+
static void amd_sdw_probe_work(struct work_struct *work)
{
struct amd_sdw_manager *amd_manager = container_of(work, struct amd_sdw_manager,
@@ -909,6 +1037,8 @@ static int amd_sdw_manager_probe(struct platform_device *pdev)
return ret;
}
dev_set_drvdata(dev, amd_manager);
+ INIT_WORK(&amd_manager->amd_sdw_irq_thread, amd_sdw_irq_thread);
+ INIT_WORK(&amd_manager->amd_sdw_work, amd_sdw_update_slave_status_work);
INIT_WORK(&amd_manager->probe_work, amd_sdw_probe_work);
/*
* Instead of having lengthy probe sequence, spilt probe in two and
@@ -186,6 +186,7 @@
#define AMD_SDW1_PAD_KEEPER_EN_MASK 0x10
#define AMD_SDW0_PAD_KEEPER_DISABLE_MASK 0x1E
#define AMD_SDW1_PAD_KEEPER_DISABLE_MASK 0xF
+#define AMD_SDW_PREQ_INTR_STAT BIT(19)
enum amd_sdw_cmd_type {
AMD_SDW_CMD_PING = 0,
@@ -45,6 +45,8 @@ struct sdw_amd_dai_runtime {
* @mmio: SoundWire registers mmio base
* @acp_mmio: acp registers mmio base
* @reg_mask: register mask structure per manager instance
+ * @amd_sdw_irq_thread: SoundWire manager irq workqueue
+ * @amd_sdw_work: peripheral status work queue
* @probe_work: SoundWire manager probe workqueue
* @sdw_lock: mutex to protect acp share register access
* @num_din_ports: number of input ports
@@ -65,10 +67,14 @@ struct amd_sdw_manager {
void __iomem *acp_mmio;
struct sdw_manager_reg_mask *reg_mask;
+ struct work_struct amd_sdw_irq_thread;
+ struct work_struct amd_sdw_work;
struct work_struct probe_work;
/* mutex to protect acp common register access */
struct mutex *acp_sdw_lock;
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+
int num_din_ports;
int num_dout_ports;