[09/19] ASoC: amd: ps: add support for Soundwire DMA interrupts
Commit Message
Initialize workqueue for DMA interrupts handling.
Whenever audio data equal to the Soundwire FIFO watermark level are
produced/consumed, interrupt is generated.
Acknowledge the interrupt and schedule the workqueue.
Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
---
sound/soc/amd/ps/acp63.h | 2 +
sound/soc/amd/ps/pci-ps.c | 81 ++++++++++++++++++++++++++++++++++++++-
2 files changed, 82 insertions(+), 1 deletion(-)
Comments
> @@ -167,9 +167,11 @@ struct acp63_dev_data {
> struct platform_device *pdev[ACP63_DEVS];
> struct mutex acp_lock; /* protect shared registers */
> struct fwnode_handle *sdw_fw_node;
> + struct work_struct acp_sdw_dma_work;
> u16 pdev_mask;
> u16 pdev_count;
> u16 pdm_dev_index;
> + u16 dma_intr_stat[ACP63_SDW_MAX_STREAMS];
streams and DMAs are different things in SoundWire. You can have a 1:N
mapping.
> u8 sdw_master_count;
> u16 sdw0_dev_index;
> u16 sdw1_dev_index;
> diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
> index 0fbe5e27f3fb..5b82ee8e3ad8 100644
> --- a/sound/soc/amd/ps/pci-ps.c
> +++ b/sound/soc/amd/ps/pci-ps.c
> @@ -113,14 +113,37 @@ static int acp63_deinit(void __iomem *acp_base, struct device *dev)
> return 0;
> }
>
> +static void acp63_sdw_dma_workthread(struct work_struct *work)
> +{
> + struct acp63_dev_data *adata = container_of(work, struct acp63_dev_data,
> + acp_sdw_dma_work);
> + struct sdw_dma_dev_data *sdw_dma_data;
> + u32 stream_index;
> + u16 pdev_index;
> +
> + pdev_index = adata->sdw_dma_dev_index;
> + sdw_dma_data = dev_get_drvdata(&adata->pdev[pdev_index]->dev);
> +
> + for (stream_index = 0; stream_index < ACP63_SDW_MAX_STREAMS; stream_index++) {
> + if (adata->dma_intr_stat[stream_index]) {
> + if (sdw_dma_data->sdw_stream[stream_index])
> + snd_pcm_period_elapsed(sdw_dma_data->sdw_stream[stream_index]);
is there a reason why you do this in a work thread?
IIRC we did this in SOF because of an issue where during an xrun a stop
IPC would be sent while we were dealing with an IPC.
I don't quite see why it's needed for a DMA?
What am I missing?
> + adata->dma_intr_stat[stream_index] = 0;
> + }
> + }
> +}
On 11/01/23 21:08, Pierre-Louis Bossart wrote:
>
>
>> @@ -167,9 +167,11 @@ struct acp63_dev_data {
>> struct platform_device *pdev[ACP63_DEVS];
>> struct mutex acp_lock; /* protect shared registers */
>> struct fwnode_handle *sdw_fw_node;
>> + struct work_struct acp_sdw_dma_work;
>> u16 pdev_mask;
>> u16 pdev_count;
>> u16 pdm_dev_index;
>> + u16 dma_intr_stat[ACP63_SDW_MAX_STREAMS];
> streams and DMAs are different things in SoundWire. You can have a 1:N
> mapping.
>
>> u8 sdw_master_count;
>> u16 sdw0_dev_index;
>> u16 sdw1_dev_index;
>> diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
>> index 0fbe5e27f3fb..5b82ee8e3ad8 100644
>> --- a/sound/soc/amd/ps/pci-ps.c
>> +++ b/sound/soc/amd/ps/pci-ps.c
>> @@ -113,14 +113,37 @@ static int acp63_deinit(void __iomem *acp_base, struct device *dev)
>> return 0;
>> }
>>
>> +static void acp63_sdw_dma_workthread(struct work_struct *work)
>> +{
>> + struct acp63_dev_data *adata = container_of(work, struct acp63_dev_data,
>> + acp_sdw_dma_work);
>> + struct sdw_dma_dev_data *sdw_dma_data;
>> + u32 stream_index;
>> + u16 pdev_index;
>> +
>> + pdev_index = adata->sdw_dma_dev_index;
>> + sdw_dma_data = dev_get_drvdata(&adata->pdev[pdev_index]->dev);
>> +
>> + for (stream_index = 0; stream_index < ACP63_SDW_MAX_STREAMS; stream_index++) {
>> + if (adata->dma_intr_stat[stream_index]) {
>> + if (sdw_dma_data->sdw_stream[stream_index])
>> + snd_pcm_period_elapsed(sdw_dma_data->sdw_stream[stream_index]);
> is there a reason why you do this in a work thread?
>
> IIRC we did this in SOF because of an issue where during an xrun a stop
> IPC would be sent while we were dealing with an IPC.
>
> I don't quite see why it's needed for a DMA?
>
> What am I missing?
Initially, we have used in atomic context. We have seen issues
during stream closure, in interrupt context , handling
period_elapsed causing sleep in atomic context.
To avoid that , we have declared dai_link as non-atomic and
moved period_elapsed code to work queue.
>> + adata->dma_intr_stat[stream_index] = 0;
>> + }
>> + }
>> +}
@@ -167,9 +167,11 @@ struct acp63_dev_data {
struct platform_device *pdev[ACP63_DEVS];
struct mutex acp_lock; /* protect shared registers */
struct fwnode_handle *sdw_fw_node;
+ struct work_struct acp_sdw_dma_work;
u16 pdev_mask;
u16 pdev_count;
u16 pdm_dev_index;
+ u16 dma_intr_stat[ACP63_SDW_MAX_STREAMS];
u8 sdw_master_count;
u16 sdw0_dev_index;
u16 sdw1_dev_index;
@@ -113,14 +113,37 @@ static int acp63_deinit(void __iomem *acp_base, struct device *dev)
return 0;
}
+static void acp63_sdw_dma_workthread(struct work_struct *work)
+{
+ struct acp63_dev_data *adata = container_of(work, struct acp63_dev_data,
+ acp_sdw_dma_work);
+ struct sdw_dma_dev_data *sdw_dma_data;
+ u32 stream_index;
+ u16 pdev_index;
+
+ pdev_index = adata->sdw_dma_dev_index;
+ sdw_dma_data = dev_get_drvdata(&adata->pdev[pdev_index]->dev);
+
+ for (stream_index = 0; stream_index < ACP63_SDW_MAX_STREAMS; stream_index++) {
+ if (adata->dma_intr_stat[stream_index]) {
+ if (sdw_dma_data->sdw_stream[stream_index])
+ snd_pcm_period_elapsed(sdw_dma_data->sdw_stream[stream_index]);
+ adata->dma_intr_stat[stream_index] = 0;
+ }
+ }
+}
+
static irqreturn_t acp63_irq_handler(int irq, void *dev_id)
{
struct acp63_dev_data *adata;
struct pdm_dev_data *ps_pdm_data;
struct amd_sdwc_ctrl *ctrl;
u32 ext_intr_stat, ext_intr_stat1;
+ u32 stream_id = 0;
u16 irq_flag = 0;
+ u16 sdw_dma_irq_flag = 0;
u16 pdev_index;
+ u16 index;
adata = dev_id;
if (!adata)
@@ -159,7 +182,58 @@ static irqreturn_t acp63_irq_handler(int irq, void *dev_id)
snd_pcm_period_elapsed(ps_pdm_data->capture_stream);
irq_flag = 1;
}
- if (irq_flag)
+ if (ext_intr_stat & ACP_SDW_DMA_IRQ_MASK) {
+ for (index = ACP_HS_RX_THRESHOLD; index <= ACP_AUDIO_TX_THRESHOLD; index++) {
+ if (ext_intr_stat & BIT(index)) {
+ acp63_writel(BIT(index),
+ adata->acp63_base + ACP_EXTERNAL_INTR_STAT);
+ switch (index) {
+ case ACP_AUDIO_TX_THRESHOLD:
+ stream_id = ACP_SDW_AUDIO_TX;
+ break;
+ case ACP_BT_TX_THRESHOLD:
+ stream_id = ACP_SDW_BT_TX;
+ break;
+ case ACP_HS_TX_THRESHOLD:
+ stream_id = ACP_SDW_HS_TX;
+ break;
+ case ACP_AUDIO_RX_THRESHOLD:
+ stream_id = ACP_SDW_AUDIO_RX;
+ break;
+ case ACP_BT_RX_THRESHOLD:
+ stream_id = ACP_SDW_BT_RX;
+ break;
+ case ACP_HS_RX_THRESHOLD:
+ stream_id = ACP_SDW_HS_RX;
+ break;
+ }
+
+ adata->dma_intr_stat[stream_id] = 1;
+ sdw_dma_irq_flag = 1;
+ }
+ }
+ }
+
+ /* SDW1 BT RX */
+ if (ext_intr_stat1 & BIT(ACP_P1_BT_RX_THRESHOLD)) {
+ acp63_writel(BIT(ACP_P1_BT_RX_THRESHOLD),
+ adata->acp63_base + ACP_EXTERNAL_INTR_STAT1);
+ adata->dma_intr_stat[ACP_SDW1_BT_RX] = 1;
+ sdw_dma_irq_flag = 1;
+ }
+
+ /* SDW1 BT TX*/
+ if (ext_intr_stat1 & BIT(ACP_P1_BT_TX_THRESHOLD)) {
+ acp63_writel(BIT(ACP_P1_BT_TX_THRESHOLD),
+ adata->acp63_base + ACP_EXTERNAL_INTR_STAT1);
+ adata->dma_intr_stat[ACP_SDW1_BT_TX] = 1;
+ sdw_dma_irq_flag = 1;
+ }
+
+ if (sdw_dma_irq_flag)
+ schedule_work(&adata->acp_sdw_dma_work);
+
+ if (irq_flag || sdw_dma_irq_flag)
return IRQ_HANDLED;
else
return IRQ_NONE;
@@ -240,6 +314,7 @@ static int get_acp63_device_config(u32 config, struct pci_dev *pci, struct acp63
if (sdw_dev) {
is_sdw_dev = true;
acp_data->sdw_fw_node = acpi_fwnode_handle(sdw_dev);
+ INIT_WORK(&acp_data->acp_sdw_dma_work, acp63_sdw_dma_workthread);
ret = sdw_amd_scan_controller(dev);
if (ret)
return ret;
@@ -612,6 +687,10 @@ static void snd_acp63_remove(struct pci_dev *pci)
int ret, index;
adata = pci_get_drvdata(pci);
+
+ if (adata->pdev_mask & ACP63_SDW_DEV_MASK)
+ cancel_work_sync(&adata->acp_sdw_dma_work);
+
for (index = 0; index < adata->pdev_count; index++)
platform_device_unregister(adata->pdev[index]);
ret = acp63_deinit(adata->acp63_base, &pci->dev);