1
0
Fork 0
forked from nuttx/nuttx-update

Experimental change to STM32 Ethernet driver a success. Porting change to all other Ethernet drivers.

This commit is contained in:
Gregory Nutt 2017-03-03 14:45:09 -06:00
parent 47ebe1e320
commit 86239d4a73
24 changed files with 170 additions and 970 deletions

View file

@ -1,7 +1,7 @@
/****************************************************************************
* arch/arm/src/c5471/c5471_ethernet.c
*
* Copyright (C) 2007, 2009-2010, 2014-2015 Gregory Nutt. All rights reserved.
* Copyright (C) 2007, 2009-2010, 2014-2015, 2017 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Based one a C5471 Linux driver and released under this BSD license with
@ -312,7 +312,8 @@ struct c5471_driver_s
bool c_bifup; /* true:ifup false:ifdown */
WDOG_ID c_txpoll; /* TX poll timer */
WDOG_ID c_txtimeout; /* TX timeout timer */
struct work_s c_work; /* For deferring work to the work queue */
struct work_s c_irqwork; /* For deferring interrupt work to the work queue */
struct work_s c_pollwork; /* For deferring poll work to the work queue */
/* Note: According to the C547x documentation: "The software has to maintain
* two pointers to the current RX-CPU and TX-CPU descriptors. At init time,
@ -1660,13 +1661,9 @@ static int c5471_interrupt(int irq, FAR void *context, FAR void *arg)
wd_cancel(priv->c_txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->c_work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->c_work, c5471_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->c_irqwork, c5471_interrupt_work, priv, 0);
return OK;
}
@ -1740,15 +1737,11 @@ static void c5471_txtimeout_expiry(int argc, wdparm_t arg, ...)
up_disable_irq(C5471_IRQ_ETHER);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
/* Schedule to perform the TX timeout processing on the worker thread,
* canceling any pending IRQ work.
*/
work_cancel(ETHWORK, &priv->c_work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->c_work, c5471_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->c_irqwork, c5471_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -1813,25 +1806,9 @@ static void c5471_poll_expiry(int argc, wdparm_t arg, ...)
{
struct c5471_driver_s *priv = (struct c5471_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->c_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->c_work, c5471_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->c_txpoll, C5471_WDDELAY, c5471_poll_expiry,
1, arg);
}
work_queue(ETHWORK, &priv->c_pollwork, c5471_poll_work, priv, 0);
}
/****************************************************************************
@ -2023,11 +2000,11 @@ static int c5471_txavail(FAR struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->c_work))
if (work_available(&priv->c_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->c_work, c5471_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->c_pollwork, c5471_txavail_work, priv, 0);
}
return OK;

View file

@ -225,7 +225,8 @@ struct kinetis_driver_s
uint8_t phyaddr; /* Selected PHY address */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring interrupt work to the work queue */
struct work_s pollwork; /* For deferring poll work to the work queue */
struct enet_desc_s *txdesc; /* A pointer to the list of TX descriptor */
struct enet_desc_s *rxdesc; /* A pointer to the list of RX descriptors */
@ -946,13 +947,9 @@ static int kinetis_interrupt(int irq, FAR void *context, FAR void *arg)
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, kinetis_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, kinetis_interrupt_work, priv, 0);
return OK;
}
@ -1028,15 +1025,11 @@ static void kinetis_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(KINETIS_IRQ_EMACRX);
up_disable_irq(KINETIS_IRQ_EMACMISC);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
/* Schedule to perform the TX timeout processing on the worker thread,
* canceling any pending interrupt work.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, kinetis_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, kinetis_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -1104,25 +1097,9 @@ static void kinetis_polltimer_expiry(int argc, uint32_t arg, ...)
{
FAR struct kinetis_driver_s *priv = (FAR struct kinetis_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the poll processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, kinetis_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, KINETIS_WDDELAY, kinetis_polltimer_expiry,
1, (wdparm_t)arg);
}
work_queue(ETHWORK, &priv->pollwork, kinetis_poll_work, priv, 0);
}
/****************************************************************************
@ -1380,11 +1357,11 @@ static int kinetis_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, kinetis_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, kinetis_txavail_work, priv, 0);
}
return OK;

View file

@ -1221,11 +1221,9 @@ static int lpc17_interrupt(int irq, void *context, FAR void *arg)
priv->lp_inten &= ~ETH_RXINTS;
lpc17_putreg(priv->lp_inten, LPC17_ETH_INTEN);
/* Cancel any pending RX done work */
work_cancel(ETHWORK, &priv->lp_rxwork);
/* Schedule RX-related work to be performed on the work thread */
/* Schedule RX-related work to be performed on the work thread,
* perhaps cancelling any pending RX work.
*/
work_queue(ETHWORK, &priv->lp_rxwork, (worker_t)lpc17_rxdone_work,
priv, 0);
@ -1262,8 +1260,6 @@ static int lpc17_interrupt(int irq, void *context, FAR void *arg)
if ((status & ETH_INT_TXDONE) != 0)
{
int delay;
NETDEV_TXDONE(&priv->lp_dev);
/* A packet transmission just completed */
@ -1285,31 +1281,10 @@ static int lpc17_interrupt(int irq, void *context, FAR void *arg)
work_cancel(ETHWORK, &priv->lp_txwork);
/* Check if the poll timer is running. If it is not, then
* start it now. There is a race condition here: We may test
* the time remaining on the poll timer and determine that it
* is still running, but then the timer expires immiately.
* That should not be problem, however, the poll timer is
* queued for processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
/* Schedule TX-related work to be performed on the work thread,
* perhaps cancelling any pending TX work.
*/
delay = wd_gettime(priv->lp_txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is
* necessary to avoid certain race conditions where the
* polling sequence can be interrupted.
*/
(void)wd_start(priv->lp_txpoll, LPC17_WDDELAY,
lpc17_poll_expiry, 1, priv);
}
/* Schedule TX-related work to be performed on the work thread */
work_queue(ETHWORK, &priv->lp_txwork, (worker_t)lpc17_txdone_work,
priv, 0);
}
@ -1496,24 +1471,9 @@ static void lpc17_poll_expiry(int argc, uint32_t arg, ...)
DEBUGASSERT(arg);
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->lp_pollwork))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->lp_pollwork, lpc17_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->lp_txpoll, LPC17_WDDELAY, lpc17_poll_expiry, 1, arg);
}
work_queue(ETHWORK, &priv->lp_pollwork, lpc17_poll_work, priv, 0);
}
/****************************************************************************

View file

@ -519,7 +519,8 @@ struct lpc43_ethmac_s
uint8_t fduplex : 1; /* Full (vs. half) duplex */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring work to the work queue */
struct work_s pollwork; /* For deferring work to the work queue */
/* This holds the information visible to the NuttX network */
@ -1862,33 +1863,10 @@ static void lpc43_txdone(FAR struct lpc43_ethmac_s *priv)
if (priv->inflight <= 0)
{
int delay;
/* Cancel the TX timeout */
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, LPC43_WDDELAY, lpc43_poll_expiry,
1, priv);
}
/* And disable further TX interrupts. */
lpc43_disableint(priv, ETH_DMAINT_TI);
@ -2048,13 +2026,9 @@ static int lpc43_interrupt(int irq, FAR void *context, FAR void *arg)
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, lpc43_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, lpc43_interrupt_work, priv, 0);
}
return OK;
@ -2129,15 +2103,11 @@ static void lpc43_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(LPC43M4_IRQ_ETHERNET);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
/* Schedule to perform the TX timeout processing on the worker thread,
* perhaps cancelling any pending IRQ processing.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, lpc43_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, lpc43_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -2234,25 +2204,9 @@ static void lpc43_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct lpc43_ethmac_s *priv = (FAR struct lpc43_ethmac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, lpc43_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, LPC43_WDDELAY, lpc43_poll_expiry, 1,
(uint32_t)priv);
}
work_queue(ETHWORK, &priv->pollwork, lpc43_poll_work, priv, 0);
}
/****************************************************************************
@ -2421,11 +2375,11 @@ static int lpc43_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, lpc43_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, lpc43_txavail_work, priv, 0);
}
return OK;

View file

@ -270,7 +270,8 @@ struct sam_emac_s
uint8_t ifup : 1; /* true:ifup false:ifdown */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring interrupt work to the work queue */
struct work_s pollwork; /* For deferring poll work to the work queue */
/* This holds the information visible to the NuttX network */
@ -1638,44 +1639,17 @@ static int sam_emac_interrupt(int irq, void *context, FAR void *arg)
tsr = sam_getreg(priv, SAM_EMAC_TSR);
if ((tsr & EMAC_TSR_TXCOMP) != 0)
{
int delay;
/* If a TX transfer just completed, then cancel the TX timeout so
* there will be do race condition between any subsequent timeout
* expiration and the deferred interrupt processing.
*/
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry,
1, priv);
}
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_interrupt_work, priv, 0);
return OK;
}
@ -1746,15 +1720,9 @@ static void sam_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(SAM_IRQ_EMAC);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -1819,24 +1787,9 @@ static void sam_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct sam_emac_s *priv = (FAR struct sam_emac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry, 1, arg);
}
work_queue(ETHWORK, &priv->pollwork, sam_poll_work, priv, 0);
}
/****************************************************************************
@ -2027,11 +1980,11 @@ static int sam_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, sam_txavail_work, priv, 0);
}
return OK;

View file

@ -275,7 +275,8 @@ struct sam_emac_s
uint8_t ifup : 1; /* true:ifup false:ifdown */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring interrupt work to the work queue */
struct work_s pollwork; /* For deferring poll work to the work queue */
/* This holds the information visible to the NuttX network */
@ -1676,44 +1677,17 @@ static int sam_emac_interrupt(int irq, void *context, FAR void *arg)
tsr = sam_getreg(priv, SAM_EMAC_TSR_OFFSET);
if ((tsr & EMAC_TSR_COMP) != 0)
{
int delay;
/* If a TX transfer just completed, then cancel the TX timeout so
* there will be do race condition between any subsequent timeout
* expiration and the deferred interrupt processing.
*/
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry,
1, priv);
}
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_interrupt_work, priv, 0);
return OK;
}
@ -1782,15 +1756,9 @@ static void sam_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(SAM_IRQ_EMAC);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -1855,24 +1823,9 @@ static void sam_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct sam_emac_s *priv = (FAR struct sam_emac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry, 1, arg);
}
work_queue(ETHWORK, &priv->pollwork, sam_poll_work, priv, 0);
}
/****************************************************************************
@ -2063,11 +2016,11 @@ static int sam_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, sam_txavail_work, priv, 0);
}
return OK;

View file

@ -412,7 +412,8 @@ struct sam_emac_s
uint8_t ifup : 1; /* true:ifup false:ifdown */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring interrupt work to the work queue */
struct work_s pollwork; /* For deferring poll work to the work queue */
/* This holds the information visible to the NuttX network */
@ -2037,44 +2038,17 @@ static int sam_emac_interrupt(int irq, void *context, FAR void *arg)
tsr = sam_getreg(priv, SAM_EMAC_TSR_OFFSET);
if ((tsr & EMAC_TSR_TXCOMP) != 0)
{
int delay;
/* If a TX transfer just completed, then cancel the TX timeout so
* there will be do race condition between any subsequent timeout
* expiration and the deferred interrupt processing.
*/
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry,
1, priv);
}
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_interrupt_work, priv, 0);
return OK;
}
@ -2143,15 +2117,9 @@ static void sam_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(priv->attr->irq);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -2216,24 +2184,9 @@ static void sam_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct sam_emac_s *priv = (FAR struct sam_emac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry, 1, arg);
}
work_queue(ETHWORK, &priv->pollwork, sam_poll_work, priv, 0);
}
/****************************************************************************
@ -2432,11 +2385,11 @@ static int sam_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, sam_txavail_work, priv, 0);
}
return OK;

View file

@ -201,7 +201,8 @@ struct sam_gmac_s
uint8_t ifup : 1; /* true:ifup false:ifdown */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring interrupt work to the work queue */
struct work_s pollwork; /* For deferring poll work to the work queue */
/* This holds the information visible to the NuttX network */
@ -1628,43 +1629,17 @@ static int sam_gmac_interrupt(int irq, void *context, FAR void *arg)
tsr = sam_getreg(priv, SAM_GMAC_TSR_OFFSET);
if ((tsr & GMAC_TSR_TXCOMP) != 0)
{
int delay;
/* If a TX transfer just completed, then cancel the TX timeout so
* there will be do race condition between any subsequent timeout
* expiration and the deferred interrupt processing.
*/
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry, 1, priv);
}
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_interrupt_work, priv, 0);
return OK;
}
@ -1733,15 +1708,9 @@ static void sam_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(SAM_IRQ_GMAC);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -1806,24 +1775,9 @@ static void sam_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct sam_gmac_s *priv = (FAR struct sam_gmac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry, 1, arg);
}
work_queue(ETHWORK, &priv->pollwork, sam_poll_work, priv, 0);
}
/****************************************************************************
@ -2017,11 +1971,11 @@ static int sam_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, sam_txavail_work, priv, 0);
}
return OK;

View file

@ -517,7 +517,8 @@ struct sam_emac_s
uint8_t ifup : 1; /* true:ifup false:ifdown */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring work to the work queue */
struct work_s pollwork; /* For deferring work to the work queue */
/* This holds the information visible to the NuttX network */
@ -2483,44 +2484,17 @@ static int sam_emac_interrupt(int irq, void *context, FAR void *arg)
tsr = sam_getreg(priv, SAM_EMAC_TSR_OFFSET);
if ((tsr & EMAC_TSR_TXCOMP) != 0)
{
int delay;
/* If a TX transfer just completed, then cancel the TX timeout so
* there will be do race condition between any subsequent timeout
* expiration and the deferred interrupt processing.
*/
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry,
1, priv);
}
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_interrupt_work, priv, 0);
return OK;
}
@ -2591,15 +2565,9 @@ static void sam_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(priv->attr->irq);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, sam_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -2664,24 +2632,9 @@ static void sam_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct sam_emac_s *priv = (FAR struct sam_emac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, SAM_WDDELAY, sam_poll_expiry, 1, arg);
}
work_queue(ETHWORK, &priv->pollwork, sam_poll_work, priv, 0);
}
/****************************************************************************
@ -2883,11 +2836,11 @@ static int sam_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, sam_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, sam_txavail_work, priv, 0);
}
return OK;

View file

@ -2172,14 +2172,10 @@ static void stm32_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(STM32_IRQ_ETH);
/* Cancel any pending interrupt work. This will have no effect on work that
* has already been started.
/* Schedule to perform the TX timeout processing on the worker thread,
* perhaps canceling any pending IRQ processing.
*/
work_cancel(ETHWORK, &priv->irqwork);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->irqwork, stm32_txtimeout_work, priv, 0);
}
@ -2277,24 +2273,9 @@ static void stm32_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct stm32_ethmac_s *priv = (FAR struct stm32_ethmac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->pollwork))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->pollwork, stm32_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, STM32_WDDELAY, stm32_poll_expiry, 1, (uint32_t)priv);
}
work_queue(ETHWORK, &priv->pollwork, stm32_poll_work, priv, 0);
}
/****************************************************************************

View file

@ -607,7 +607,8 @@ struct stm32_ethmac_s
uint8_t intf; /* Ethernet interface number */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring interrupt work to the work queue */
struct work_s pollwork; /* For deferring poll work to the work queue */
/* This holds the information visible to the NuttX network */
@ -2040,33 +2041,10 @@ static void stm32_txdone(struct stm32_ethmac_s *priv)
if (priv->inflight <= 0)
{
int delay;
/* Cancel the TX timeout */
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, STM32_WDDELAY, stm32_poll_expiry,
1, priv);
}
/* And disable further TX interrupts. */
stm32_disableint(priv, ETH_DMAINT_TI);
@ -2229,13 +2207,9 @@ static int stm32_interrupt(int irq, void *context, FAR void *arg)
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, stm32_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, stm32_interrupt_work, priv, 0);
}
return OK;
@ -2308,15 +2282,9 @@ static void stm32_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(STM32_IRQ_ETH);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, stm32_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, stm32_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -2413,24 +2381,9 @@ static void stm32_poll_expiry(int argc, uint32_t arg, ...)
{
struct stm32_ethmac_s *priv = (struct stm32_ethmac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, stm32_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, STM32_WDDELAY, stm32_poll_expiry, 1, (uint32_t)priv);
}
work_queue(ETHWORK, &priv->pollwork, stm32_poll_work, priv, 0);
}
/****************************************************************************
@ -2599,11 +2552,11 @@ static int stm32_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, stm32_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, stm32_txavail_work, priv, 0);
}
return OK;

View file

@ -202,7 +202,8 @@ struct tiva_driver_s
bool ld_bifup; /* true:ifup false:ifdown */
WDOG_ID ld_txpoll; /* TX poll timer */
WDOG_ID ld_txtimeout; /* TX timeout timer */
struct work_s ld_work; /* For deferring work to the work queue */
struct work_s ld_irqwork; /* For deferring interrupt work to the work queue */
struct work_s ld_pollwork; /* For deferring poll work to the work queue */
/* This holds the information visible to the NuttX network */
@ -1093,13 +1094,9 @@ static int tiva_interrupt(int irq, void *context, FAR void *arg)
wd_cancel(priv->ld_txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->ld_work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->ld_work, tiva_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->ld_irqwork, tiva_interrupt_work, priv, 0);
return OK;
}
@ -1176,15 +1173,9 @@ static void tiva_txtimeout_expiry(int argc, wdparm_t arg, ...)
up_disable_irq(TIVA_IRQ_ETHCON);
#endif
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->ld_work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->ld_work, tiva_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->ld_irqwork, tiva_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -1256,25 +1247,9 @@ static void tiva_poll_expiry(int argc, wdparm_t arg, ...)
{
struct tiva_driver_s *priv = (struct tiva_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->ld_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->ld_work, tiva_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->ld_txpoll, TIVA_WDDELAY, tiva_poll_expiry,
1, arg);
}
work_queue(ETHWORK, &priv->ld_pollwork, tiva_poll_work, priv, 0);
}
/****************************************************************************
@ -1587,11 +1562,11 @@ static int tiva_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->ld_work))
if (work_available(&priv->ld_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->ld_work, tiva_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->ld_pollwork, tiva_txavail_work, priv, 0);
}
return OK;

View file

@ -626,7 +626,9 @@ struct tiva_ethmac_s
uint8_t fduplex : 1; /* Full (vs. half) duplex */
WDOG_ID txpoll; /* TX poll timer */
WDOG_ID txtimeout; /* TX timeout timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s irqwork; /* For deferring interrupt work to the work queue */
struct work_s pollwork; /* For deferring poll work to the work queue */
#ifdef CONFIG_TIVA_PHY_INTERRUPTS
xcpt_t handler; /* Attached PHY interrupt handler */
void *arg; /* Argument that accompanies the interrupt */
@ -1956,33 +1958,10 @@ static void tiva_txdone(FAR struct tiva_ethmac_s *priv)
if (priv->inflight <= 0)
{
int delay;
/* Cancel the TX timeout */
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, TIVA_WDDELAY, tiva_poll_expiry,
1, (uint32_t)priv);
}
/* And disable further TX interrupts. */
tiva_disableint(priv, EMAC_DMAINT_TI);
@ -2146,13 +2125,9 @@ static int tiva_interrupt(int irq, FAR void *context, FAR void *arg)
wd_cancel(priv->txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, tiva_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, tiva_interrupt_work, priv, 0);
}
#ifdef CONFIG_TIVA_PHY_INTERRUPTS
@ -2243,15 +2218,9 @@ static void tiva_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(TIVA_IRQ_ETHCON);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->work, tiva_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->irqwork, tiva_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -2349,25 +2318,9 @@ static void tiva_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->work, tiva_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, TIVA_WDDELAY, tiva_poll_expiry,
1, (uint32_t)priv);
}
work_queue(ETHWORK, &priv->pollwork, tiva_poll_work, priv, 0);
}
/****************************************************************************
@ -2537,11 +2490,11 @@ static int tiva_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->work))
if (work_available(&priv->pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->work, tiva_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pollwork, tiva_txavail_work, priv, 0);
}
return OK;

View file

@ -1,7 +1,7 @@
/****************************************************************************
* arch/mips/src/pic32mx/pic32mx_ethernet.c
*
* Copyright (C) 2012, 2014-2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2012, 2014-2017 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* This driver derives from the PIC32MX Ethernet Driver
@ -321,7 +321,8 @@ struct pic32mx_driver_s
uint32_t pd_inten; /* Shadow copy of INTEN register */
WDOG_ID pd_txpoll; /* TX poll timer */
WDOG_ID pd_txtimeout; /* TX timeout timer */
struct work_s pd_work; /* For deferring work to the work queue */
struct work_s pd_irqwork; /* For deferring interrupt work to the work queue */
struct work_s pd_pollwork; /* For deferring poll work to the work queue */
sq_queue_t pd_freebuffers; /* The free buffer list */
@ -1891,13 +1892,9 @@ static int pic32mx_interrupt(int irq, void *context, FAR void *arg)
wd_cancel(priv->pd_txtimeout);
}
/* Cancel any pending poll work */
work_cancel(HPWORK, &priv->pd_work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->pd_work, pic32mx_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->pd_irqwork, pic32mx_interrupt_work, priv, 0);
return OK;
}
@ -1978,15 +1975,9 @@ static void pic32mx_txtimeout_expiry(int argc, wdparm_t arg, ...)
up_disable_irq(PIC32MX_IRQSRC_ETH);
#endif
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->pd_work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->pd_work, pic32mx_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->pd_irqwork, pic32mx_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -2054,25 +2045,9 @@ static void pic32mx_poll_expiry(int argc, wdparm_t arg, ...)
{
struct pic32mx_driver_s *priv = (struct pic32mx_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->pd_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->pd_work, pic32mx_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->pd_txpoll, PIC32MX_WDDELAY, pic32mx_poll_expiry,
1, arg);
}
work_queue(ETHWORK, &priv->pd_pollwork, pic32mx_poll_work, priv, 0);
}
/****************************************************************************
@ -2491,11 +2466,11 @@ static int pic32mx_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->pd_work))
if (work_available(&priv->pd_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->pd_work, pic32mx_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pd_pollwork, pic32mx_txavail_work, priv, 0);
}
return OK;

View file

@ -1,7 +1,7 @@
/****************************************************************************
* arch/mips/src/pic32mz/pic32mz_ethernet.c
*
* Copyright (C) 2015-2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2015-2017 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* This driver derives from the PIC32MZ Ethernet Driver
@ -348,7 +348,8 @@ struct pic32mz_driver_s
uint32_t pd_inten; /* Shadow copy of INTEN register */
WDOG_ID pd_txpoll; /* TX poll timer */
WDOG_ID pd_txtimeout; /* TX timeout timer */
struct work_s pd_work; /* For deferring work to the work queue */
struct work_s pd_irqwork; /* For deferring interrupt work to the work queue */
struct work_s pd_pollwork; /* For deferring poll work to the work queue */
sq_queue_t pd_freebuffers; /* The free buffer list */
@ -1918,13 +1919,9 @@ static int pic32mz_interrupt(int irq, void *context, FAR void *arg)
wd_cancel(priv->pd_txtimeout);
}
/* Cancel any pending poll work */
work_cancel(HPWORK, &priv->pd_work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->pd_work, pic32mz_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->pd_irqwork, pic32mz_interrupt_work, priv, 0);
return OK;
}
@ -2005,15 +2002,9 @@ static void pic32mz_txtimeout_expiry(int argc, wdparm_t arg, ...)
up_disable_irq(PIC32MZ_IRQ_ETH);
#endif
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->pd_work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->pd_work, pic32mz_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->pd_irqwork, pic32mz_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -2081,25 +2072,9 @@ static void pic32mz_poll_expiry(int argc, wdparm_t arg, ...)
{
struct pic32mz_driver_s *priv = (struct pic32mz_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->pd_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->pd_work, pic32mz_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->pd_txpoll, PIC32MZ_WDDELAY, pic32mz_poll_expiry,
1, arg);
}
work_queue(ETHWORK, &priv->pd_pollwork, pic32mz_poll_work, priv, 0);
}
/****************************************************************************
@ -2524,11 +2499,11 @@ static int pic32mz_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->pd_work))
if (work_available(&priv->pd_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->pd_work, pic32mz_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->pd_pollwork, pic32mz_txavail_work, priv, 0);
}
return OK;

View file

@ -117,7 +117,8 @@ struct misoc_net_driver_s
bool misoc_net_bifup; /* true:ifup false:ifdown */
WDOG_ID misoc_net_txpoll; /* TX poll timer */
WDOG_ID misoc_net_txtimeout; /* TX timeout timer */
struct work_s misoc_net_work; /* For deferring work to the work queue */
struct work_s misoc_net_irqwork; /* For deferring interrupt work to the work queue */
struct work_s misoc_net_pollwork; /* For deferring poll work to the work queue */
uint8_t *rx0_buf; /* 2 RX and 2 TX buffer */
uint8_t *rx1_buf;
@ -542,8 +543,6 @@ static void misoc_net_receive(FAR struct misoc_net_driver_s *priv)
static void misoc_net_txdone(FAR struct misoc_net_driver_s *priv)
{
int delay;
/* Check for errors and update statistics */
NETDEV_TXDONE(priv->misoc_net_dev);
@ -556,26 +555,6 @@ static void misoc_net_txdone(FAR struct misoc_net_driver_s *priv)
wd_cancel(priv->misoc_net_txtimeout);
/* Check if the poll timer is running. If it is not, then start it now.
* There is a race condition here: We may test the time remaining on the
* poll timer and determine that it is still running, but then the timer
* expires immiately. That should not be problem, however, the poll timer
* processing should be in the work queue and should execute immediately
* after we complete the TX poll. Inefficient, but not fatal.
*/
delay = wd_gettime(priv->misoc_net_txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary to
* avoid certain race conditions where the polling sequence can be
* interrupted.
*/
(void)wd_start(priv->misoc_net_txpoll, MISOC_NET_WDDELAY,
misoc_net_poll_expiry, 1, (wdparm_t)priv);
}
/* And disable further TX interrupts. */
ethmac_sram_reader_ev_enable_write(0);
@ -673,13 +652,9 @@ static int misoc_net_interrupt(int irq, FAR void *context, FAR void *arg)
wd_cancel(priv->misoc_net_txtimeout);
}
/* Cancel any pending poll work */
work_cancel(HPWORK, &priv->misoc_net_work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(HPWORK, &priv->misoc_net_work, misoc_net_interrupt_work, priv, 0);
work_queue(HPWORK, &priv->misoc_net_irqwork, misoc_net_interrupt_work, priv, 0);
return OK;
}
@ -747,15 +722,9 @@ static void misoc_net_txtimeout_expiry(int argc, wdparm_t arg, ...)
//up_disable_irq(ETHMAC_INTERRUPT);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(HPWORK, &priv->misoc_net_work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(HPWORK, &priv->misoc_net_work, misoc_net_txtimeout_work, priv, 0);
work_queue(HPWORK, &priv->misoc_net_irqwork, misoc_net_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -824,25 +793,9 @@ static void misoc_net_poll_expiry(int argc, wdparm_t arg, ...)
{
FAR struct misoc_net_driver_s *priv = (FAR struct misoc_net_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->misoc_net_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(HPWORK, &priv->misoc_net_work, misoc_net_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->misoc_net_txpoll, MISOC_NET_WDDELAY,
misoc_net_poll_expiry, 1, arg);
}
work_queue(HPWORK, &priv->misoc_net_pollwork, misoc_net_poll_work, priv, 0);
}
/****************************************************************************
@ -1012,11 +965,11 @@ static int misoc_net_txavail(FAR struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->misoc_net_work))
if (work_available(&priv->misoc_net_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(HPWORK, &priv->misoc_net_work, misoc_net_txavail_work, priv, 0);
work_queue(HPWORK, &priv->misoc_net_pollwork, misoc_net_txavail_work, priv, 0);
}
return OK;

View file

@ -1,7 +1,7 @@
/****************************************************************************
* arch/z80/src/ez80/ez80_emac.c
*
* Copyright (C) 2009-2010, 2012, 2014-2016 Gregory Nutt. All rights reserved.
* Copyright (C) 2009-2010, 2012, 2014-2017 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* References:
@ -1815,10 +1815,6 @@ static int ez80emac_sysinterrupt(int irq, FAR void *context, FAR void *arg)
up_disable_irq(EZ80_EMACSYS_IRQ);
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->syswork);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->syswork, ez80emac_sysinterrupt_work, priv, 0);
@ -1899,12 +1895,6 @@ static void ez80emac_txtimeout_expiry(int argc, wdparm_t arg, ...)
up_disable_irq(EZ80_EMACTX_IRQ);
/* Cancel any pending poll or Tx interrupt work. This will have no
* effect on work that has already been started.
*/
work_cancel(ETHWORK, &priv->txwork);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->txwork, ez80emac_txtimeout_work, priv, 0);

View file

@ -321,7 +321,8 @@ struct dm9x_driver_s
uint8_t ncrxpackets; /* Number of continuous rx packets */
WDOG_ID dm_txpoll; /* TX poll timer */
WDOG_ID dm_txtimeout; /* TX timeout timer */
struct work_s dm_work; /* For deferring work to the work queue */
struct work_s dm_irqwork; /* For deferring interrupt work to the work queue */
struct work_s dm_pollwork; /* For deferring poll work to the work queue */
/* Mode-dependent function to move data in 8/16/32 I/O modes */
@ -1267,13 +1268,9 @@ static int dm9x_interrupt(int irq, FAR void *context, FAR void *arg)
wd_cancel(priv->dm_txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->dm_work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->dm_work, dm9x_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->dm_irqwork, dm9x_interrupt_work, priv, 0);
return OK;
}
@ -1351,15 +1348,9 @@ static void dm9x_txtimeout_expiry(int argc, wdparm_t arg, ...)
up_disable_irq(CONFIG_DM9X_IRQ);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->dm_work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->dm_work, dm9x_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->dm_irqwork, dm9x_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -1437,25 +1428,9 @@ static void dm9x_poll_expiry(int argc, wdparm_t arg, ...)
{
FAR struct dm9x_driver_s *priv = (FAR struct dm9x_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->dm_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->dm_work, dm9x_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->dm_txpoll, DM9X_WDDELAY, dm9x_poll_expiry,
1, arg);
}
work_queue(ETHWORK, &priv->dm_pollwork, dm9x_poll_work, priv, 0);
}
/****************************************************************************
@ -1687,11 +1662,11 @@ static int dm9x_txavail(FAR struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->dm_work))
if (work_available(&priv->dm_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->dm_work, dm9x_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->dm_pollwork, dm9x_txavail_work, priv, 0);
}
return OK;

View file

@ -1275,8 +1275,6 @@ static void enc_linkstatus(FAR struct enc_driver_s *priv)
static void enc_txif(FAR struct enc_driver_s *priv)
{
int delay;
/* Update statistics */
NETDEV_TXDONE(&priv->dev);
@ -1289,26 +1287,6 @@ static void enc_txif(FAR struct enc_driver_s *priv)
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it now.
* There is a race condition here: We may test the time remaining on the
* poll timer and determine that it is still running, but then the timer
* expires immiately. That should not be problem, however, the poll timer
* processing should be in the work queue and should execute immediately
* after we complete the TX poll. Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary to
* avoid certain race conditions where the polling sequence can be
* interrupted.
*/
(void)wd_start(priv->txpoll, ENC_WDDELAY, enc_polltimer, 1,
(wdparm_t)priv);
}
/* Then poll the network for new XMIT data */
(void)devif_poll(&priv->dev, enc_txpoll);

View file

@ -1291,33 +1291,10 @@ static void enc_txif(FAR struct enc_driver_s *priv)
if (sq_empty(&priv->txqueue))
{
int delay;
/* If no further xmits are pending, then cancel the TX timeout */
wd_cancel(priv->txtimeout);
/* Check if the poll timer is running. If it is not, then start it
* now. There is a race condition here: We may test the time
* remaining on the poll timer and determine that it is still running,
* but then the timer expires immiately. That should not be problem,
* however, the poll timer processing should be in the work queue and
* should execute immediately after we complete the TX poll.
* Inefficient, but not fatal.
*/
delay = wd_gettime(priv->txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary
* to avoid certain race conditions where the polling sequence can
* be interrupted.
*/
(void)wd_start(priv->txpoll, ENC_WDDELAY, enc_polltimer, 1,
(wdparm_t)priv);
}
/* Poll for TX packets from the networking layer */
devif_poll(&priv->dev, enc_txpoll);

View file

@ -174,7 +174,8 @@ struct ftmac100_driver_s
WDOG_ID ft_txpoll; /* TX poll timer */
WDOG_ID ft_txtimeout; /* TX timeout timer */
unsigned int status; /* Last ISR status */
struct work_s ft_work; /* For deferring work to the work queue */
struct work_s ft_irqwork; /* For deferring work to the work queue */
struct work_s ft_pollwork; /* For deferring work to the work queue */
/* This holds the information visible to the NuttX network */
@ -805,7 +806,6 @@ static void ftmac100_receive(FAR struct ftmac100_driver_s *priv)
static void ftmac100_txdone(FAR struct ftmac100_driver_s *priv)
{
FAR struct ftmac100_txdes_s *txdes;
int delay;
/* Check if a Tx was pending */
@ -844,26 +844,6 @@ static void ftmac100_txdone(FAR struct ftmac100_driver_s *priv)
wd_cancel(priv->ft_txtimeout);
/* Check if the poll timer is running. If it is not, then start it now.
* There is a race condition here: We may test the time remaining on the
* poll timer and determine that it is still running, but then the timer
* expires immiately. That should not be problem, however, the poll timer
* processing should be in the work queue and should execute immediately
* after we complete the TX poll. Inefficient, but not fatal.
*/
delay = wd_gettime(priv->ft_txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary to
* avoid certain race conditions where the polling sequence can be
* interrupted.
*/
(void)wd_start(priv->ft_txpoll, FTMAC100_WDDELAY, ftmac100_poll_expiry,
1, (wdparm_t)priv);
}
/* Then poll the network for new XMIT data */
(void)devif_poll(&priv->ft_dev, ftmac100_txpoll);
@ -1021,13 +1001,9 @@ static int ftmac100_interrupt(int irq, FAR void *context, FAR void *arg)
wd_cancel(priv->ft_txtimeout);
}
/* Cancel any pending poll work */
work_cancel(FTMAWORK, &priv->ft_work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(FTMAWORK, &priv->ft_work, ftmac100_interrupt_work, priv, 0);
work_queue(FTMAWORK, &priv->ft_irqwork, ftmac100_interrupt_work, priv, 0);
return OK;
}
@ -1095,16 +1071,11 @@ static void ftmac100_txtimeout_expiry(int argc, uint32_t arg, ...)
up_disable_irq(CONFIG_FTMAC100_IRQ);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(FTMAWORK, &priv->ft_work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(FTMAWORK, &priv->ft_work, ftmac100_txtimeout_work, priv, 0);
work_queue(FTMAWORK, &priv->ft_irqwork, ftmac100_txtimeout_work, priv, 0);
}
/****************************************************************************
* Function: ftmac100_poll_work
*
@ -1170,25 +1141,9 @@ static void ftmac100_poll_expiry(int argc, uint32_t arg, ...)
{
FAR struct ftmac100_driver_s *priv = (FAR struct ftmac100_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->ft_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(FTMAWORK, &priv->ft_work, ftmac100_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->ft_txpoll, FTMAC100_WDDELAY, ftmac100_poll_expiry,
1, (wdparm_t)arg);
}
work_queue(FTMAWORK, &priv->ft_pollwork, ftmac100_poll_work, priv, 0);
}
/****************************************************************************
@ -1365,11 +1320,11 @@ static int ftmac100_txavail(struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->ft_work))
if (work_available(&priv->ft_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(FTMAWORK, &priv->ft_work, ftmac100_txavail_work, priv, 0);
work_queue(FTMAWORK, &priv->ft_pollwork, ftmac100_txavail_work, priv, 0);
}
return OK;

View file

@ -103,7 +103,7 @@ struct lo_driver_s
bool lo_bifup; /* true:ifup false:ifdown */
bool lo_txdone; /* One RX packet was looped back */
WDOG_ID lo_polldog; /* TX poll timer */
struct work_s lo_work; /* For deferring work to the work queue */
struct work_s lo_work; /* For deferring poll work to the work queue */
/* This holds the information visible to the NuttX network */
@ -283,24 +283,9 @@ static void lo_poll_expiry(int argc, wdparm_t arg, ...)
{
FAR struct lo_driver_s *priv = (FAR struct lo_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->lo_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(LPBKWORK, &priv->lo_work, lo_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->lo_polldog, LO_WDDELAY, lo_poll_expiry, 1, arg);
}
work_queue(LPBKWORK, &priv->lo_work, lo_poll_work, priv, 0);
}
/****************************************************************************

View file

@ -116,7 +116,8 @@ struct skel_driver_s
bool sk_bifup; /* true:ifup false:ifdown */
WDOG_ID sk_txpoll; /* TX poll timer */
WDOG_ID sk_txtimeout; /* TX timeout timer */
struct work_s sk_work; /* For deferring work to the work queue */
struct work_s sk_irqwork; /* For deferring interupt work to the work queue */
struct work_s sk_pollwork; /* For deferring poll work to the work queue */
/* This holds the information visible to the NuttX network */
@ -477,26 +478,6 @@ static void skel_txdone(FAR struct skel_driver_s *priv)
wd_cancel(priv->sk_txtimeout);
/* Check if the poll timer is running. If it is not, then start it now.
* There is a race condition here: We may test the time remaining on the
* poll timer and determine that it is still running, but then the timer
* expires immiately. That should not be problem, however, the poll timer
* processing should be in the work queue and should execute immediately
* after we complete the TX poll. Inefficient, but not fatal.
*/
delay = wd_gettime(priv->sk_txpoll);
if (delay <= 0)
{
/* The poll timer is not running .. restart it. This is necessary to
* avoid certain race conditions where the polling sequence can be
* interrupted.
*/
(void)wd_start(priv->sk_txpoll, skeleton_WDDELAY, skel_poll_expiry,
1, (wdparm_t)priv);
}
/* And disable further TX interrupts. */
/* In any event, poll the network for new TX data */
@ -588,13 +569,9 @@ static int skel_interrupt(int irq, FAR void *context, FAR void *arg)
wd_cancel(priv->sk_txtimeout);
}
/* Cancel any pending poll work */
work_cancel(ETHWORK, &priv->sk_work);
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->sk_work, skel_interrupt_work, priv, 0);
work_queue(ETHWORK, &priv->sk_irqwork, skel_interrupt_work, priv, 0);
return OK;
}
@ -662,15 +639,9 @@ static void skel_txtimeout_expiry(int argc, wdparm_t arg, ...)
up_disable_irq(CONFIG_skeleton_IRQ);
/* Cancel any pending poll or interrupt work. This will have no effect
* on work that has already been started.
*/
work_cancel(ETHWORK, &priv->sk_work);
/* Schedule to perform the TX timeout processing on the worker thread. */
work_queue(ETHWORK, &priv->sk_work, skel_txtimeout_work, priv, 0);
work_queue(ETHWORK, &priv->sk_irqwork, skel_txtimeout_work, priv, 0);
}
/****************************************************************************
@ -758,24 +729,9 @@ static void skel_poll_expiry(int argc, wdparm_t arg, ...)
{
FAR struct skel_driver_s *priv = (FAR struct skel_driver_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the interrupt processing on the worker thread. */
if (work_available(&priv->sk_work))
{
/* Schedule to perform the interrupt processing on the worker thread. */
work_queue(ETHWORK, &priv->sk_work, skel_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->sk_txpoll, skeleton_WDDELAY, skel_poll_expiry, 1, arg);
}
work_queue(ETHWORK, &priv->sk_pollwork, skel_poll_work, priv, 0);
}
/****************************************************************************
@ -940,11 +896,11 @@ static int skel_txavail(FAR struct net_driver_s *dev)
* availability action.
*/
if (work_available(&priv->sk_work))
if (work_available(&priv->sk_pollwork))
{
/* Schedule to serialize the poll on the worker thread. */
work_queue(ETHWORK, &priv->sk_work, skel_txavail_work, priv, 0);
work_queue(ETHWORK, &priv->sk_pollwork, skel_txavail_work, priv, 0);
}
return OK;

View file

@ -115,7 +115,7 @@ struct tun_device_s
{
bool bifup; /* true:ifup false:ifdown */
WDOG_ID txpoll; /* TX poll timer */
struct work_s work; /* For deferring work to the work queue */
struct work_s work; /* For deferring poll work to the work queue */
FAR struct file *filep;
@ -591,24 +591,9 @@ static void tun_poll_expiry(int argc, wdparm_t arg, ...)
{
FAR struct tun_device_s *priv = (FAR struct tun_device_s *)arg;
/* Is our single work structure available? It may not be if there are
* pending interrupt actions.
*/
/* Schedule to perform the timer expiration on the worker thread. */
if (work_available(&priv->work))
{
/* Schedule to perform the timer expiration on the worker thread. */
work_queue(TUNWORK, &priv->work, tun_poll_work, priv, 0);
}
else
{
/* No.. Just re-start the watchdog poll timer, missing one polling
* cycle.
*/
(void)wd_start(priv->txpoll, TUN_WDDELAY, tun_poll_expiry, 1, arg);
}
work_queue(TUNWORK, &priv->work, tun_poll_work, priv, 0);
}
/****************************************************************************