/* if_dmc.c 6.4 84/12/20 */
* DMC11 device driver, internet version
/* #define DEBUG /* for base table dump on fatal error */
#include "../machine/pte.h"
#include "ioctl.h" /* must precede tty.h */
#include "../net/netisr.h"
#include "../net/route.h"
#include "../netinet/in.h"
#include "../netinet/in_systm.h"
#include "../netinet/ip.h"
#include "../netinet/ip_var.h"
#include "../vaxuba/ubareg.h"
#include "../vaxuba/ubavar.h"
int dmctimer
; /* timer started? */
int dmc_timeout
= 8; /* timeout value */
* Driver information for auto-configuration stuff.
int dmcprobe(), dmcattach(), dmcinit(), dmcioctl();
int dmcoutput(), dmcreset();
struct uba_device
*dmcinfo
[NDMC
];
u_short dmcstd
[] = { 0 };
struct uba_driver dmcdriver
=
{ dmcprobe
, 0, dmcattach
, 0, dmcstd
, "dmc", dmcinfo
};
#define NTOT (NRCV + NXMT)
#define NCMDS (NTOT+4) /* size of command queue */
#define printd if(dmcdebug)printf
/* error reporting intervals */
char qp_cmd
; /* command */
short qp_ubaddr
; /* buffer address */
short qp_cc
; /* character count || XMEM */
struct dmc_command
*qp_next
; /* next command on queue */
* The dmcuba structures generalize the ifuba structure
* to an arbitrary number of receive and transmit buffers.
struct ifrw x_ifrw
; /* mapping info */
struct pte x_map
[IF_MAXNUBAMR
]; /* output base pages */
short x_xswapd
; /* mask of clusters swapped */
struct mbuf
*x_xtofree
; /* pages being dma'd out */
short ifu_uban
; /* uba number */
short ifu_hlen
; /* local net header length */
struct uba_regs
*ifu_uba
; /* uba regs, in vm */
struct ifrw ifu_r
[NRCV
]; /* receive information */
struct ifxmt ifu_w
[NXMT
]; /* transmit information */
/* these should only be pointers */
short ifu_flags
; /* used during uballoc's */
int ubinfo
; /* from uballoc */
short cc
; /* buffer size */
short flags
; /* access control */
#define DBUF_OURS 0 /* buffer is available */
#define DBUF_DMCS 1 /* buffer claimed by somebody */
#define DBUF_XMIT 4 /* transmit buffer */
#define DBUF_RCV 8 /* receive buffer */
* DMC software status per interface.
* Each interface is referenced by a network interface structure,
* sc_if, which the routing code uses to locate the interface.
* This structure contains the output queue for the interface, its address, ...
* We also have, for each interface, a set of 7 UBA interface structures
* contain information about the UNIBUS resources held by the interface:
* map registers, buffered data paths, etc. Information is cached in this
* structure for use by the if_uba.c routines in running the interface
short sc_oused
; /* output buffers currently in use */
short sc_iused
; /* input buffers given to DMC */
short sc_flag
; /* flags */
int sc_nticks
; /* seconds since last interrupt */
struct ifnet sc_if
; /* network-visible interface */
struct dmcbufs sc_rbufs
[NRCV
]; /* receive buffer info */
struct dmcbufs sc_xbufs
[NXMT
]; /* transmit buffer info */
struct dmcuba sc_ifuba
; /* UNIBUS resources */
int sc_ubinfo
; /* UBA mapping info for base table */
int sc_errors
[4]; /* non-fatal error counters */
#define sc_datck sc_errors[0]
#define sc_timeo sc_errors[1]
#define sc_nobuf sc_errors[2]
#define sc_disc sc_errors[3]
/* command queue stuff */
struct dmc_command sc_cmdbuf
[NCMDS
];
struct dmc_command
*sc_qhead
; /* head of command queue */
struct dmc_command
*sc_qtail
; /* tail of command queue */
struct dmc_command
*sc_qactive
; /* command in progress */
struct dmc_command
*sc_qfreeh
; /* head of list of free cmd buffers */
struct dmc_command
*sc_qfreet
; /* tail of list of free cmd buffers */
/* end command queue stuff */
#define DMC_ALLOC 01 /* unibus resources allocated */
#define DMC_BMAPPED 02 /* base table mapped */
#define DMC_RESTART 04 /* software restart in progress */
#define DMC_ACTIVE 08 /* device active */
short d_base
[128]; /* DMC base table */
/* queue manipulation macros */
#define QUEUE_AT_HEAD(qp, head, tail) \
(qp)->qp_next = (head); \
if ((tail) == (struct dmc_command *) 0) \
#define QUEUE_AT_TAIL(qp, head, tail) \
(tail)->qp_next = (qp); \
(qp)->qp_next = (struct dmc_command *) 0; \
#define DEQUEUE(head, tail) \
(head) = (head)->qp_next;\
if ((head) == (struct dmc_command *) 0)\
register struct dmcdevice
*addr
= (struct dmcdevice
*)reg
;
br
= 0; cvec
= br
; br
= cvec
;
for (i
= 100000; i
&& (addr
->bsel1
& DMC_RUN
) == 0; i
--)
if ((addr
->bsel1
& DMC_RUN
) == 0) {
printf("dmcprobe: can't start device\n" );
addr
->bsel0
= DMC_RQI
|DMC_IEI
;
addr
->bsel0
|= DMC_RQI
|DMC_IEI
;
for (i
= 100000; i
&& (addr
->bsel1
& DMC_RUN
) == 0; i
--)
* Interface exists: make available by filling in network interface
* record. System will initialize the interface when it is ready
register struct uba_device
*ui
;
register struct dmc_softc
*sc
= &dmc_softc
[ui
->ui_unit
];
sc
->sc_if
.if_unit
= ui
->ui_unit
;
sc
->sc_if
.if_name
= "dmc";
sc
->sc_if
.if_mtu
= DMCMTU
;
sc
->sc_if
.if_init
= dmcinit
;
sc
->sc_if
.if_output
= dmcoutput
;
sc
->sc_if
.if_ioctl
= dmcioctl
;
sc
->sc_if
.if_reset
= dmcreset
;
sc
->sc_if
.if_flags
= IFF_POINTOPOINT
;
sc
->sc_ifuba
.ifu_flags
= UBA_CANTWAIT
;
timeout(dmcwatch
, (caddr_t
) 0, hz
);
* Reset of interface after UNIBUS reset.
* If interface is on specified UBA, reset it's state.
register struct uba_device
*ui
;
register struct dmc_softc
*sc
= &dmc_softc
[unit
];
if (unit
>= NDMC
|| (ui
= dmcinfo
[unit
]) == 0 || ui
->ui_alive
== 0 ||
* Initialization of interface; reinitialize UNIBUS usage.
register struct dmc_softc
*sc
= &dmc_softc
[unit
];
register struct uba_device
*ui
= dmcinfo
[unit
];
register struct dmcdevice
*addr
;
register struct ifnet
*ifp
= &sc
->sc_if
;
register struct ifrw
*ifrw
;
register struct ifxmt
*ifxp
;
register struct dmcbufs
*rp
;
register struct dmc_command
*qp
;
addr
= (struct dmcdevice
*)ui
->ui_addr
;
sin
= (struct sockaddr_in
*) &ifp
->if_addr
;
if (sin
->sin_addr
.s_addr
== 0) /* if address still unknown */
sin
= (struct sockaddr_in
*) &ifp
->if_dstaddr
;
if (sin
->sin_addr
.s_addr
== 0) /* if address still unknown */
if ((addr
->bsel1
&DMC_RUN
) == 0) {
printf("dmcinit: DMC not running\n");
ifp
->if_flags
&= ~(IFF_RUNNING
|IFF_UP
);
if ((sc
->sc_flag
& DMC_BMAPPED
) == 0) {
sc
->sc_ubinfo
= uballoc(ui
->ui_ubanum
,
(caddr_t
)&dmc_base
[unit
], sizeof (struct dmc_base
), 0);
sc
->sc_flag
|= DMC_BMAPPED
;
/* initialize UNIBUS resources */
sc
->sc_iused
= sc
->sc_oused
= 0;
if ((sc
->sc_flag
& DMC_ALLOC
) == 0) {
if (dmc_ubainit(&sc
->sc_ifuba
, ui
->ui_ubanum
,
sizeof(struct dmc_header
), (int)btoc(DMCMTU
)) == 0) {
printf("dmc%d: can't initialize\n", unit
);
ifp
->if_flags
&= ~IFF_UP
;
sc
->sc_flag
|= DMC_ALLOC
;
/* initialize buffer pool */
ifrw
= &sc
->sc_ifuba
.ifu_r
[0];
for (rp
= &sc
->sc_rbufs
[0]; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
rp
->ubinfo
= ifrw
->ifrw_info
& 0x3ffff;
rp
->cc
= DMCMTU
+ sizeof (struct dmc_header
);
rp
->flags
= DBUF_OURS
|DBUF_RCV
;
ifxp
= &sc
->sc_ifuba
.ifu_w
[0];
for (rp
= &sc
->sc_xbufs
[0]; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++) {
rp
->ubinfo
= ifxp
->x_ifrw
.ifrw_info
& 0x3ffff;
rp
->flags
= DBUF_OURS
|DBUF_XMIT
;
/* set up command queues */
sc
->sc_qfreeh
= sc
->sc_qfreet
= sc
->sc_qhead
= sc
->sc_qtail
= sc
->sc_qactive
=
/* set up free command buffer list */
for (qp
= &sc
->sc_cmdbuf
[0]; qp
< &sc
->sc_cmdbuf
[NCMDS
]; qp
++) {
QUEUE_AT_HEAD(qp
, sc
->sc_qfreeh
, sc
->sc_qfreet
);
base
= sc
->sc_ubinfo
& 0x3ffff;
dmcload(sc
, DMC_BASEI
, base
, (base
>>2) & DMC_XMEM
);
/* specify half duplex operation, flags tell if primary */
/* or secondary station */
/* use DDMCP mode in full duplex */
dmcload(sc
, DMC_CNTLI
, 0, 0);
else if (ui
->ui_flags
== 1)
/* use MAINTENENCE mode */
dmcload(sc
, DMC_CNTLI
, 0, DMC_MAINT
);
else if (ui
->ui_flags
== 2)
/* use DDCMP half duplex as primary station */
dmcload(sc
, DMC_CNTLI
, 0, DMC_HDPLX
);
else if (ui
->ui_flags
== 3)
/* use DDCMP half duplex as secondary station */
dmcload(sc
, DMC_CNTLI
, 0, DMC_HDPLX
| DMC_SEC
);
/* enable operation done interrupts */
sc
->sc_flag
&= ~DMC_ACTIVE
;
while ((addr
->bsel2
& DMC_IEO
) == 0)
/* queue first NRCV buffers for DMC to fill */
for (rp
= &sc
->sc_rbufs
[0]; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
dmcload(sc
, DMC_READ
, rp
->ubinfo
,
(((rp
->ubinfo
>>2)&DMC_XMEM
) | rp
->cc
));
ifp
->if_flags
|= IFF_UP
|IFF_RUNNING
;
* Start output on interface. Get another datagram
* to send from the interface queue and map it to
* the interface before starting output.
* Must be called at spl 5
register struct dmc_softc
*sc
= &dmc_softc
[unit
];
register struct dmcbufs
*rp
;
* Dequeue up to NXMT requests and map them to the UNIBUS.
* If no more requests, or no dmc buffers available, just return.
for (rp
= &sc
->sc_xbufs
[0]; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++ ) {
/* find an available buffer */
if ((rp
->flags
& DBUF_DMCS
) == 0) {
IF_DEQUEUE(&sc
->sc_if
.if_snd
, m
);
rp
->flags
|= (DBUF_DMCS
);
* Have request mapped to UNIBUS for transmission
rp
->cc
= dmcput(&sc
->sc_ifuba
, n
, m
);
dmcload(sc
, DMC_WRITE
, rp
->ubinfo
,
rp
->cc
| ((rp
->ubinfo
>>2)&DMC_XMEM
));
* Utility routine to load the DMC device registers.
dmcload(sc
, type
, w0
, w1
)
register struct dmc_softc
*sc
;
register struct dmcdevice
*addr
;
register struct dmc_command
*qp
;
addr
= (struct dmcdevice
*)dmcinfo
[unit
]->ui_addr
;
/* grab a command buffer from the free list */
if ((qp
= sc
->sc_qfreeh
) == (struct dmc_command
*)0)
panic("dmc command queue overflow");
DEQUEUE(sc
->sc_qfreeh
, sc
->sc_qfreet
);
/* fill in requested info */
qp
->qp_cmd
= (type
| DMC_RQI
);
if (sc
->sc_qactive
) { /* command in progress */
QUEUE_AT_HEAD(qp
, sc
->sc_qhead
, sc
->sc_qtail
);
QUEUE_AT_TAIL(qp
, sc
->sc_qhead
, sc
->sc_qtail
);
} else { /* command port free */
addr
->bsel0
= qp
->qp_cmd
;
* DMC interface receiver interrupt.
* Ready to accept another command,
* pull one off the command queue.
register struct dmc_softc
*sc
;
register struct dmcdevice
*addr
;
register struct dmc_command
*qp
;
addr
= (struct dmcdevice
*)dmcinfo
[unit
]->ui_addr
;
if ((qp
= sc
->sc_qactive
) == (struct dmc_command
*) 0) {
printf("dmc%d: dmcrint no command\n", unit
);
while (addr
->bsel0
&DMC_RDYI
) {
addr
->sel4
= qp
->qp_ubaddr
;
addr
->bsel0
&= ~(DMC_IEI
|DMC_RQI
);
/* free command buffer */
QUEUE_AT_HEAD(qp
, sc
->sc_qfreeh
, sc
->sc_qfreet
);
while (addr
->bsel0
& DMC_RDYI
) {
* Can't check for RDYO here 'cause
* this routine isn't reentrant!
/* move on to next command */
if ((sc
->sc_qactive
= sc
->sc_qhead
) == (struct dmc_command
*)0)
/* more commands to do, start the next one */
DEQUEUE(sc
->sc_qhead
, sc
->sc_qtail
);
addr
->bsel0
= qp
->qp_cmd
;
if ((addr
->bsel0
&DMC_RDYI
) || (addr
->bsel2
&DMC_RDYO
))
addr
->bsel0
|= DMC_IEI
|DMC_RQI
;
/* VMS does it twice !*$%@# */
addr
->bsel0
|= DMC_IEI
|DMC_RQI
;
* DMC interface transmitter interrupt.
* A transfer may have completed, check for errors.
* If it was a read, notify appropriate protocol.
* If it was a write, pull the next one off the queue.
register struct dmc_softc
*sc
;
register struct ifnet
*ifp
;
struct uba_device
*ui
= dmcinfo
[unit
];
int arg
, pkaddr
, cmd
, len
;
register struct ifrw
*ifrw
;
register struct dmcbufs
*rp
;
register struct ifxmt
*ifxp
;
addr
= (struct dmcdevice
*)ui
->ui_addr
;
while (addr
->bsel2
& DMC_RDYO
) {
cmd
= addr
->bsel2
& 0xff;
arg
= addr
->sel6
& 0xffff;
/* reconstruct UNIBUS address of buffer returned to us */
pkaddr
= ((arg
&DMC_XMEM
)<<2) | (addr
->sel4
& 0xffff);
addr
->bsel2
&= ~DMC_RDYO
;
* Pass packet to type specific
* higher-level input routine.
/* find location in dmcuba struct */
ifrw
= &sc
->sc_ifuba
.ifu_r
[0];
for (rp
= &sc
->sc_rbufs
[0]; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
if (rp
>= &sc
->sc_rbufs
[NRCV
])
if ((rp
->flags
& DBUF_DMCS
) == 0)
printf("dmc%d: done unalloc rbuf\n", unit
);
len
= (arg
& DMC_CCOUNT
) - sizeof (struct dmc_header
);
if (len
< 0 || len
> DMCMTU
) {
printd("dmc%d: bad rcv pkt addr 0x%x len 0x%x\n",
* Deal with trailer protocol: if type is trailer
* get true type from first 16-bit word past data.
* Remember that type was trailer by setting off.
dh
= (struct dmc_header
*)ifrw
->ifrw_addr
;
dh
->dmc_type
= ntohs((u_short
)dh
->dmc_type
);
#define dmcdataaddr(dh, off, type) ((type)(((caddr_t)((dh)+1)+(off))))
if (dh
->dmc_type
>= DMC_TRAILER
&&
dh
->dmc_type
< DMC_TRAILER
+DMC_NTRAILER
) {
off
= (dh
->dmc_type
- DMC_TRAILER
) * 512;
dh
->dmc_type
= ntohs(*dmcdataaddr(dh
, off
, u_short
*));
resid
= ntohs(*(dmcdataaddr(dh
, off
+2, u_short
*)));
* Pull packet off interface. Off is nonzero if
* packet has trailing header; dmc_get will then
* force this header information to be at the front,
* but we still have to drop the type and length
* which are at the front of any trailer data.
m
= dmc_get(&sc
->sc_ifuba
, ifrw
, len
, off
);
m
->m_off
+= 2 * sizeof (u_short
);
m
->m_len
-= 2 * sizeof (u_short
);
rp
->ubinfo
= ifrw
->ifrw_info
& 0x3ffff;
dmcload(sc
, DMC_READ
, rp
->ubinfo
,
((rp
->ubinfo
>> 2) & DMC_XMEM
) | rp
->cc
);
* A write has completed, start another
* transfer if there is more data to send.
/* find associated dmcbuf structure */
ifxp
= &sc
->sc_ifuba
.ifu_w
[0];
for (rp
= &sc
->sc_xbufs
[0]; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++) {
if (rp
>= &sc
->sc_xbufs
[NXMT
]) {
printf("dmc%d: bad packet address 0x%x\n",
if ((rp
->flags
& DBUF_DMCS
) == 0)
printf("dmc%d: unallocated packet 0x%x\n",
(void)m_freem(ifxp
->x_xtofree
);
sc
->sc_flag
|= DMC_ACTIVE
;
printd("dmc%d: fatal error, flags=%b\n",
ifp
->if_flags
&= ~(IFF_RUNNING
|IFF_UP
);
/* ACCUMULATE STATISTICS */
if ((sc
->sc_nobuf
++ % DMC_RPNBFS
) == 0)
if ((sc
->sc_disc
++ % DMC_RPDSC
) == 0)
if ((sc
->sc_timeo
++ % DMC_RPTMO
) == 0)
if ((sc
->sc_datck
++ % DMC_RPDCK
) == 0)
printd("dmc%d: soft error, flags=%b\n", unit
,
if ((sc
->sc_flag
& DMC_RESTART
) == 0) {
* kill off the dmc to get things
* going again by generating a
sc
->sc_flag
|= DMC_RESTART
;
arg
= sc
->sc_ubinfo
& 0x3ffff;
dmcload(sc
, DMC_BASEI
, arg
, (arg
>>2)&DMC_XMEM
);
printf("dmc%d: bad control %o\n", unit
, cmd
);
* Encapsulate a packet of type family for the dmc.
* Use trailer local net encapsulation if enough data in first
* packet leaves a multiple of 512 bytes of data in remainder.
register struct ifnet
*ifp
;
register struct mbuf
*m0
;
register struct mbuf
*m
= m0
;
register struct dmc_header
*dh
;
switch (dst
->sa_family
) {
off
= ntohs((u_short
)mtod(m
, struct ip
*)->ip_len
) - m
->m_len
;
if ((ifp
->if_flags
& IFF_NOTRAILERS
) == 0)
if (off
> 0 && (off
& 0x1ff) == 0 &&
m
->m_off
>= MMINOFF
+ 2 * sizeof (u_short
)) {
type
= DMC_TRAILER
+ (off
>>9);
m
->m_off
-= 2 * sizeof (u_short
);
m
->m_len
+= 2 * sizeof (u_short
);
*mtod(m
, u_short
*) = htons((u_short
)DMC_IPTYPE
);
*(mtod(m
, u_short
*) + 1) = htons((u_short
)m
->m_len
);
dh
= (struct dmc_header
*)dst
->sa_data
;
printf("dmc%d: can't handle af%d\n", ifp
->if_unit
,
* Packet to be sent as a trailer; move first packet
* (control information) to end of chain.
* Add local network header
* (there is space for a uba on a vax to step on)
if (m
->m_off
> MMAXOFF
||
MMINOFF
+ sizeof(struct dmc_header
) > m
->m_off
) {
m
= m_get(M_DONTWAIT
, MT_HEADER
);
m
->m_len
= sizeof (struct dmc_header
);
m
->m_off
-= sizeof (struct dmc_header
);
m
->m_len
+= sizeof (struct dmc_header
);
dh
= mtod(m
, struct dmc_header
*);
dh
->dmc_type
= htons((u_short
)type
);
* Queue message on interface, and start output if interface
if (IF_QFULL(&ifp
->if_snd
)) {
IF_ENQUEUE(&ifp
->if_snd
, m
);
* Process an ioctl request.
register struct ifnet
*ifp
;
struct ifreq
*ifr
= (struct ifreq
*)data
;
int s
= splimp(), error
= 0;
sin
= (struct sockaddr_in
*)&ifr
->ifr_addr
;
if (sin
->sin_family
!= AF_INET
)
if (ifp
->if_flags
& IFF_RUNNING
)
if_rtinit(ifp
, -1); /* delete previous route */
ifp
->if_addr
= *(struct sockaddr
*)sin
;
ifp
->if_net
= in_netof(sin
->sin_addr
);
/* set up routing table entry */
if ((ifp
->if_flags
& IFF_ROUTE
) == 0) {
rtinit(&ifp
->if_dstaddr
, &ifp
->if_addr
, RTF_HOST
|RTF_UP
);
ifp
->if_flags
|= IFF_ROUTE
;
ifp
->if_dstaddr
= ifr
->ifr_dstaddr
;
if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
* Routines supporting UNIBUS network interfaces.
* Init UNIBUS for interface on uban whose headers of size hlen are to
* end on a page boundary. We allocate a UNIBUS map register for the page
* with the header, and nmr more UNIBUS map registers for i/o on the adapter,
* doing this for each receive and transmit buffer. We also
* allocate page frames in the mbuffer pool for these pages.
dmc_ubainit(ifu
, uban
, hlen
, nmr
)
register struct dmcuba
*ifu
;
register struct ifrw
*ifrw
;
register struct ifxmt
*ifxp
;
ncl
= clrnd(nmr
+ CLSIZE
) / CLSIZE
;
if (ifu
->ifu_r
[0].ifrw_addr
)
* If the first read buffer has a non-zero
* address, it means we have already allocated core
cp
= ifu
->ifu_r
[0].ifrw_addr
- (CLBYTES
- hlen
);
cp
= m_clalloc(NTOT
* ncl
, MPG_SPACE
);
ifu
->ifu_uba
= uba_hd
[uban
].uh_uba
;
dp
= cp
+ CLBYTES
- hlen
;
for (ifrw
= ifu
->ifu_r
; ifrw
< &ifu
->ifu_r
[NRCV
]; ifrw
++) {
for (ifxp
= ifu
->ifu_w
; ifxp
< &ifu
->ifu_w
[NXMT
]; ifxp
++) {
ifxp
->x_ifrw
.ifrw_addr
= dp
;
/* allocate for receive ring */
for (ifrw
= ifu
->ifu_r
; ifrw
< &ifu
->ifu_r
[NRCV
]; ifrw
++) {
if (dmc_ubaalloc(ifu
, ifrw
, nmr
) == 0) {
for (rw
= ifu
->ifu_r
; rw
< ifrw
; rw
++)
ubarelse(ifu
->ifu_uban
, &rw
->ifrw_info
);
/* and now transmit ring */
for (ifxp
= ifu
->ifu_w
; ifxp
< &ifu
->ifu_w
[NXMT
]; ifxp
++) {
if (dmc_ubaalloc(ifu
, ifrw
, nmr
) == 0) {
for (xp
= ifu
->ifu_w
; xp
< ifxp
; xp
++)
ubarelse(ifu
->ifu_uban
, &xp
->x_ifrw
.ifrw_info
);
for (ifrw
= ifu
->ifu_r
; ifrw
< &ifu
->ifu_r
[NRCV
]; ifrw
++)
ubarelse(ifu
->ifu_uban
, &ifrw
->ifrw_info
);
for (i
= 0; i
< nmr
; i
++)
ifxp
->x_map
[i
] = ifrw
->ifrw_mr
[i
];
m_pgfree(cp
, NTOT
* ncl
);
ifu
->ifu_r
[0].ifrw_addr
= 0;
* Setup either a ifrw structure by allocating UNIBUS map registers,
* possibly a buffered data path, and initializing the fields of
* the ifrw structure to minimize run-time overhead.
dmc_ubaalloc(ifu
, ifrw
, nmr
)
register struct ifrw
*ifrw
;
uballoc(ifu
->ifu_uban
, ifrw
->ifrw_addr
, nmr
*NBPG
+ ifu
->ifu_hlen
,
ifrw
->ifrw_bdp
= UBAI_BDP(info
);
ifrw
->ifrw_proto
= UBAMR_MRV
| (UBAI_BDP(info
) << UBAMR_DPSHIFT
);
ifrw
->ifrw_mr
= &ifu
->ifu_uba
->uba_map
[UBAI_MR(info
) + 1];
* Pull read data off a interface.
* Len is length of data, with local net header stripped.
* Off is non-zero if a trailer protocol was used, and
* gives the offset of the trailer information.
* We copy the trailer information and then all the normal
* data into mbufs. When full cluster sized units are present
* on the interface on cluster boundaries we can get them more
* easily by remapping, and take advantage of this here.
dmc_get(ifu
, ifrw
, totlen
, off0
)
register struct dmcuba
*ifu
;
register struct ifrw
*ifrw
;
struct mbuf
*top
, **mp
, *m
;
register caddr_t cp
= ifrw
->ifrw_addr
+ ifu
->ifu_hlen
;
MGET(m
, M_DONTWAIT
, MT_DATA
);
cp
= ifrw
->ifrw_addr
+ ifu
->ifu_hlen
+ off
;
len
= m
->m_len
= CLBYTES
;
m
->m_off
= (int)p
- (int)m
;
* Switch pages mapped to UNIBUS with new page p,
* as quick form of copy. Remap UNIBUS and invalidate.
cpte
= &Mbmap
[mtocl(cp
)*CLSIZE
];
ppte
= &Mbmap
[mtocl(p
)*CLSIZE
];
x
= btop(cp
- ifrw
->ifrw_addr
);
ip
= (int *)&ifrw
->ifrw_mr
[x
];
for (i
= 0; i
< CLSIZE
; i
++) {
t
= *ppte
; *ppte
++ = *cpte
; *cpte
= t
;
cpte
++->pg_pfnum
|ifrw
->ifrw_proto
;
m
->m_len
= MIN(MLEN
, len
);
bcopy(cp
, mtod(m
, caddr_t
), (unsigned)m
->m_len
);
/* sort of an ALGOL-W style for statement... */
cp
= ifrw
->ifrw_addr
+ ifu
->ifu_hlen
;
* Map a chain of mbufs onto a network interface
* in preparation for an i/o operation.
* The argument chain of mbufs includes the local network
* header which is copied to be in the mapped, aligned
register struct mbuf
*mp
;
register struct ifxmt
*ifxp
;
register struct ifrw
*ifrw
;
if (claligned(cp
) && claligned(dp
) && m
->m_len
== CLBYTES
) {
struct pte
*pte
; int *ip
;
pte
= &Mbmap
[mtocl(dp
)*CLSIZE
];
x
= btop(cp
- ifrw
->ifrw_addr
);
ip
= (int *)&ifrw
->ifrw_mr
[x
];
for (i
= 0; i
< CLSIZE
; i
++)
*ip
++ = ifrw
->ifrw_proto
| pte
++->pg_pfnum
;
xswapd
|= 1 << (x
>>(CLSHIFT
-PGSHIFT
));
m
->m_next
= ifxp
->x_xtofree
;
bcopy(mtod(m
, caddr_t
), cp
, (unsigned)m
->m_len
);
* Xswapd is the set of clusters we just mapped out. Ifxp->x_xswapd
* is the set of clusters mapped out from before. We compute
* the number of clusters involved in this operation in x.
* Clusters mapped out before and involved in this operation
* should be unmapped so original pages will be accessed by the device.
cc
= cp
- ifrw
->ifrw_addr
;
x
= ((cc
- ifu
->ifu_hlen
) + CLBYTES
- 1) >> CLSHIFT
;
ifxp
->x_xswapd
&= ~xswapd
;
while (i
= ffs(ifxp
->x_xswapd
)) {
ifxp
->x_xswapd
&= ~(1<<i
);
for (t
= 0; t
< CLSIZE
; t
++) {
ifrw
->ifrw_mr
[i
] = ifxp
->x_map
[i
];
ifxp
->x_xswapd
|= xswapd
;
* Restart after a fatal error.
* Clear device and reinitialize.
register struct dmc_softc
*sc
= &dmc_softc
[unit
];
register struct uba_device
*ui
= dmcinfo
[unit
];
register struct dmcdevice
*addr
;
register struct ifxmt
*ifxp
;
addr
= (struct dmcdevice
*)ui
->ui_addr
;
printf("dmc%d base table:\n", unit
);
for (i
= 0; i
< sizeof (struct dmc_base
); i
++)
printf("%o\n" ,dmc_base
[unit
].d_base
[i
]);
* Let the DMR finish the MCLR. At 1 Mbit, it should do so
* in about a max of 6.4 milliseconds with diagnostics enabled.
for (i
= 100000; i
&& (addr
->bsel1
& DMC_RUN
) == 0; i
--)
/* Did the timer expire or did the DMR finish? */
if ((addr
->bsel1
& DMC_RUN
) == 0) {
printf("dmc%d: M820 Test Failed\n", unit
);
#ifdef notdef /* tef sez why throw these packets away??? */
IF_DEQUEUE(&sc
->sc_if
.if_snd
, m
);
IF_DEQUEUE(&sc
->sc_if
.if_snd
, m
);
for (ifxp
= ifu
->ifu_w
; ifxp
< &ifu
->ifu_w
[NXMT
]; ifxp
++) {
(void) m_freem(ifxp
->x_xtofree
);
sc
->sc_flag
&= ~DMC_RESTART
;
sc
->sc_if
.if_collisions
++; /* why not? */
* Check to see that transmitted packets don't
* lose interrupts. The device has to be active.
register struct uba_device
*ui
;
register struct dmc_softc
*sc
;
for (i
= 0; i
< NDMC
; i
++) {
if ((sc
->sc_flag
& DMC_ACTIVE
) == 0)
if ((ui
= dmcinfo
[i
]) == 0 || ui
->ui_alive
== 0)
if (sc
->sc_nticks
> dmc_timeout
) {
addr
= (struct dmcdevice
*)ui
->ui_addr
;
printd("dmc%d hung: bsel0=%b bsel2=%b\n", i
,
addr
->bsel0
& 0xff, DMC0BITS
,
addr
->bsel2
& 0xff, DMC2BITS
);
timeout(dmcwatch
, (caddr_t
) 0, hz
);