On Tue, Nov 28, 2023, at 15:08, Herve Codina wrote:
> @@ -272,6 +274,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct
> qmc_chan_info *info)
> if (ret)
> return ret;
>
> + spin_lock_irqsave(&chan->ts_lock, flags);
> +
> info->mode = chan->mode;
> info->rx_fs_rate = tsa_info.rx_fs_rate;
> info->rx_bit_rate = tsa_info.rx_bit_rate;
> @@ -280,6 +284,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct
> qmc_chan_info *info)
> info->tx_bit_rate = tsa_info.tx_bit_rate;
> info->nb_rx_ts = hweight64(chan->rx_ts_mask);
>
> + spin_unlock_irqrestore(&chan->ts_lock, flags);
> +
> return 0;
> }
I would normally use spin_lock_irq() instead of spin_lock_irqsave()
in functions that are only called outside of atomic context.
> +static int qmc_chan_start_rx(struct qmc_chan *chan);
> +
> int qmc_chan_stop(struct qmc_chan *chan, int direction)
> {
...
> -static void qmc_chan_start_rx(struct qmc_chan *chan)
> +static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan);
> +
> +static int qmc_chan_start_rx(struct qmc_chan *chan)
> {
Can you reorder the static functions in a way that avoids the
forward declarations?
Arnd
Hi Arnd,
On Wed, 29 Nov 2023 15:03:02 +0100
"Arnd Bergmann" <arnd@arndb.de> wrote:
> On Tue, Nov 28, 2023, at 15:08, Herve Codina wrote:
> > @@ -272,6 +274,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct
> > qmc_chan_info *info)
> > if (ret)
> > return ret;
> >
> > + spin_lock_irqsave(&chan->ts_lock, flags);
> > +
> > info->mode = chan->mode;
> > info->rx_fs_rate = tsa_info.rx_fs_rate;
> > info->rx_bit_rate = tsa_info.rx_bit_rate;
> > @@ -280,6 +284,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct
> > qmc_chan_info *info)
> > info->tx_bit_rate = tsa_info.tx_bit_rate;
> > info->nb_rx_ts = hweight64(chan->rx_ts_mask);
> >
> > + spin_unlock_irqrestore(&chan->ts_lock, flags);
> > +
> > return 0;
> > }
>
> I would normally use spin_lock_irq() instead of spin_lock_irqsave()
> in functions that are only called outside of atomic context.
I would prefer to keep spin_lock_irqsave() here.
This function is part of the API and so, its quite difficult to ensure
that all calls (current and future) will be done outside of an atomic
context.
>
> > +static int qmc_chan_start_rx(struct qmc_chan *chan);
> > +
> > int qmc_chan_stop(struct qmc_chan *chan, int direction)
> > {
> ...
> > -static void qmc_chan_start_rx(struct qmc_chan *chan)
> > +static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan);
> > +
> > +static int qmc_chan_start_rx(struct qmc_chan *chan)
> > {
>
> Can you reorder the static functions in a way that avoids the
> forward declarations?
Yes, sure.
I will do that in the next iteration.
Thanks for the review,
Best regards,
Hervé
@@ -177,6 +177,7 @@ struct qmc_chan {
struct qmc *qmc;
void __iomem *s_param;
enum qmc_mode mode;
+ spinlock_t ts_lock; /* Protect timeslots */
u64 tx_ts_mask_avail;
u64 tx_ts_mask;
u64 rx_ts_mask_avail;
@@ -265,6 +266,7 @@ static void qmc_setbits32(void __iomem *addr, u32 set)
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
{
struct tsa_serial_info tsa_info;
+ unsigned long flags;
int ret;
/* Retrieve info from the TSA related serial */
@@ -272,6 +274,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
if (ret)
return ret;
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
info->mode = chan->mode;
info->rx_fs_rate = tsa_info.rx_fs_rate;
info->rx_bit_rate = tsa_info.rx_bit_rate;
@@ -280,6 +284,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
info->tx_bit_rate = tsa_info.tx_bit_rate;
info->nb_rx_ts = hweight64(chan->rx_ts_mask);
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
return 0;
}
EXPORT_SYMBOL(qmc_chan_get_info);
@@ -683,6 +689,40 @@ static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_seria
return 0;
}
+static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32tx(chan, &info, enable);
+}
+
+static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32rx(chan, &info, enable);
+}
+
static int qmc_chan_setup_tsa(struct qmc_chan *chan, bool enable)
{
struct tsa_serial_info info;
@@ -719,6 +759,12 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (chan->is_rx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP RECEIVE command */
ret = qmc_chan_command(chan, 0x0);
if (ret) {
@@ -729,6 +775,15 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
chan->is_rx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
+ ret = qmc_chan_setup_tsa_rx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
return ret;
@@ -741,6 +796,12 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (chan->is_tx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP TRANSMIT command */
ret = qmc_chan_command(chan, 0x1);
if (ret) {
@@ -751,37 +812,82 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
chan->is_tx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
+ ret = qmc_chan_setup_tsa_tx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
return ret;
}
+static int qmc_chan_start_rx(struct qmc_chan *chan);
+
int qmc_chan_stop(struct qmc_chan *chan, int direction)
{
- int ret;
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = !chan->is_rx_stopped;
ret = qmc_chan_stop_rx(chan);
if (ret)
- return ret;
+ goto end;
}
if (direction & QMC_CHAN_WRITE) {
ret = qmc_chan_stop_tx(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /* Restart rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_start_rx(chan);
+ goto end;
+ }
}
- return 0;
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_stop);
-static void qmc_chan_start_rx(struct qmc_chan *chan)
+static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan);
+
+static int qmc_chan_start_rx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (!chan->is_rx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_rx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
/* Restart the receiver */
if (chan->mode == QMC_TRANSPARENT)
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
@@ -792,15 +898,38 @@ static void qmc_chan_start_rx(struct qmc_chan *chan)
chan->is_rx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
}
-static void qmc_chan_start_tx(struct qmc_chan *chan)
+static int qmc_chan_start_tx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (!chan->is_tx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_tx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
/*
* Enable channel transmitter as it could be disabled if
* qmc_chan_reset() was called.
@@ -812,18 +941,39 @@ static void qmc_chan_start_tx(struct qmc_chan *chan)
chan->is_tx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
}
int qmc_chan_start(struct qmc_chan *chan, int direction)
{
- if (direction & QMC_CHAN_READ)
- qmc_chan_start_rx(chan);
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
- if (direction & QMC_CHAN_WRITE)
- qmc_chan_start_tx(chan);
+ spin_lock_irqsave(&chan->ts_lock, flags);
- return 0;
+ if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = chan->is_rx_stopped;
+ ret = qmc_chan_start_rx(chan);
+ if (ret)
+ goto end;
+ }
+
+ if (direction & QMC_CHAN_WRITE) {
+ ret = qmc_chan_start_tx(chan);
+ if (ret) {
+ /* Restop rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_stop_rx(chan);
+ goto end;
+ }
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_start);
@@ -992,6 +1142,7 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
}
chan->id = chan_id;
+ spin_lock_init(&chan->ts_lock);
spin_lock_init(&chan->rx_lock);
spin_lock_init(&chan->tx_lock);