/* if_dmc.c 6.3 84/09/27 */
#define printd if(dmcdebug)printf
* DMC11 device driver, internet version
* Bill Nesheim (bill@cornell.arpa or {vax135,uw-beaver,ihnp4}!bill)
* Department of Computer Science
* Based loosly on 4.2BSD release
* The UNIBUS support routines were taken from Lou Salkind's DEUNA driver
* generalize unibus routines
* add timeout to mark interface down when other end of link dies
* figure out better way to check for completed buffers
* (not critical with DMC, only 7 bufs, but may cause problems
#include "../machine/pte.h"
#include "ioctl.h" /* must precede tty.h */
#include "../net/netisr.h"
#include "../net/route.h"
#include "../netinet/in.h"
#include "../netinet/in_systm.h"
#include "../vaxuba/ubareg.h"
#include "../vaxuba/ubavar.h"
* Driver information for auto-configuration stuff.
int dmcprobe(), dmcattach(), dmcinit(), dmcioctl();
int dmcoutput(), dmcreset();
struct uba_device
*dmcinfo
[NDMC
];
u_short dmcstd
[] = { 0 };
struct uba_driver dmcdriver
=
{ dmcprobe
, 0, dmcattach
, 0, dmcstd
, "dmc", dmcinfo
};
/* as long as we use clists for command queues, we only have 28 bytes to use! */
/* DMC-11 only has 7 buffers; DMR-11 has 64 */
#define NXMT (NRCV - 2) /* avoid running out of buffers on recv end */
#define NTOT (NRCV + NXMT)
/* error reporting intervals */
char qp_cmd
; /* command */
short qp_ubaddr
; /* buffer address */
short qp_cc
; /* character count || XMEM */
struct dmc_command
*qp_next
; /* next command on queue */
* The dmcuba structures generalize the ifuba structure
* to an arbitrary number of recieve and transmit buffers.
struct ifrw x_ifrw
; /* mapping imfo */
struct pte x_map
[IF_MAXNUBAMR
]; /* output base pages */
short x_xswapd
; /* mask of clusters swapped */
struct mbuf
*x_xtofree
; /* pages being dma'd out */
short ifu_uban
; /* uba number */
short ifu_hlen
; /* local net header length */
struct uba_regs
*ifu_uba
; /* uba regs, in vm */
struct ifrw ifu_r
[NRCV
]; /* receive information */
struct ifxmt ifu_w
[NXMT
]; /* transmit information */
/* these should only be pointers */
short ifu_flags
; /* used during uballoc's */
int ubinfo
; /* from uballoc */
short cc
; /* buffer size */
short flags
; /* access control */
#define DBUF_OURS 0 /* buffer is available */
#define DBUF_DMCS 1 /* buffer claimed by somebody */
#define DBUF_XMIT 4 /* transmit buffer */
#define DBUF_RCV 8 /* recieve buffer */
* DMC software status per interface.
* Each interface is referenced by a network interface structure,
* sc_if, which the routing code uses to locate the interface.
* This structure contains the output queue for the interface, its address, ...
* We also have, for each interface, a set of 7 UBA interface structures
* contain information about the UNIBUS resources held by the interface:
* map registers, buffered data paths, etc. Information is cached in this
* structure for use by the if_uba.c routines in running the interface
short sc_oused
; /* output buffers currently in use */
short sc_iused
; /* input buffers given to DMC */
short sc_flag
; /* flags */
struct ifnet sc_if
; /* network-visible interface */
struct dmcbufs sc_rbufs
[NRCV
]; /* recieve buffer info */
struct dmcbufs sc_xbufs
[NXMT
]; /* transmit buffer info */
struct dmcuba sc_ifuba
; /* UNIBUS resources */
int sc_ubinfo
; /* UBA mapping info for base table */
int sc_errors
[4]; /* non-fatal error counters */
#define sc_datck sc_errors[0]
#define sc_timeo sc_errors[1]
#define sc_nobuf sc_errors[2]
#define sc_disc sc_errors[3]
/* command queue stuff */
struct dmc_command sc_cmdbuf
[NTOT
+3];
struct dmc_command
*sc_qhead
; /* head of command queue */
struct dmc_command
*sc_qtail
; /* tail of command queue */
struct dmc_command
*sc_qactive
; /* command in progress */
struct dmc_command
*sc_qfreeh
; /* head of list of free cmd buffers */
struct dmc_command
*sc_qfreet
; /* tail of list of free cmd buffers */
/* end command queue stuff */
#define DMC_ALLOC 01 /* unibus resources allocated */
#define DMC_BMAPPED 02 /* base table mapped */
short d_base
[128]; /* DMC base table */
/* queue manipulation macros */
#define QUEUE_AT_HEAD(qp, head, tail) \
(qp)->qp_next = (head); \
if ((tail) == (struct dmc_command *) 0) \
#define QUEUE_AT_TAIL(qp, head, tail) \
(tail)->qp_next = (qp); \
(qp)->qp_next = (struct dmc_command *) 0; \
#define DEQUEUE(head, tail) \
(head) = (head)->qp_next;\
if ((head) == (struct dmc_command *) 0)\
register struct dmcdevice
*addr
= (struct dmcdevice
*)reg
;
br
= 0; cvec
= br
; br
= cvec
;
for (i
= 100000; i
&& (addr
->bsel1
& DMC_RUN
) == 0; i
--)
if ((addr
->bsel1
& DMC_RUN
) == 0)
/* MCLR is self clearing */
addr
->bsel0
= DMC_RQI
|DMC_IEI
;
for (i
= 100000; i
&& (addr
->bsel1
& DMC_RUN
) == 0; i
--)
* Interface exists: make available by filling in network interface
* record. System will initialize the interface when it is ready
register struct uba_device
*ui
;
register struct dmc_softc
*sc
= &dmc_softc
[ui
->ui_unit
];
register struct dmc_command
*qp
;
sc
->sc_if
.if_unit
= ui
->ui_unit
;
sc
->sc_if
.if_name
= "dmc";
sc
->sc_if
.if_mtu
= DMCMTU
;
sc
->sc_if
.if_init
= dmcinit
;
sc
->sc_if
.if_output
= dmcoutput
;
sc
->sc_if
.if_ioctl
= dmcioctl
;
sc
->sc_if
.if_reset
= dmcreset
;
sc
->sc_if
.if_flags
= IFF_POINTOPOINT
;
sc
->sc_ifuba
.ifu_flags
= UBA_CANTWAIT
;
/* set up command queues */
sc
->sc_qfreeh
= sc
->sc_qfreet
=
sc
->sc_qhead
= sc
->sc_qtail
= sc
->sc_qactive
=
(struct dmc_command
*) 0;
/* set up free command buffer list */
for (qp
= &sc
->sc_cmdbuf
[0]; qp
< &sc
->sc_cmdbuf
[NTOT
+2]; qp
++ ) {
QUEUE_AT_HEAD( qp
, sc
->sc_qfreeh
, sc
->sc_qfreet
);
* Reset of interface after UNIBUS reset.
* If interface is on specified UBA, reset it's state.
register struct uba_device
*ui
;
register struct dmc_softc
*sc
= &dmc_softc
[unit
];
if (unit
>= NDMC
|| (ui
= dmcinfo
[unit
]) == 0 || ui
->ui_alive
== 0 ||
sc
->sc_flag
= 0; /* previous unibus resources no longer valid */
* Initialization of interface; reinitialize UNIBUS usage.
register struct dmc_softc
*sc
= &dmc_softc
[unit
];
register struct uba_device
*ui
= dmcinfo
[unit
];
register struct dmcdevice
*addr
;
register struct ifnet
*ifp
= &sc
->sc_if
;
register struct ifrw
*ifrw
;
register struct ifxmt
*ifxp
;
register struct dmcbufs
*rp
;
addr
= (struct dmcdevice
*)ui
->ui_addr
;
sin
= (struct sockaddr_in
*) &ifp
->if_addr
;
if (sin
->sin_addr
.s_addr
== 0) /* if address still unknown */
sin
= (struct sockaddr_in
*) &ifp
->if_dstaddr
;
if (sin
->sin_addr
.s_addr
== 0) /* if address still unknown */
if ((addr
->bsel1
&DMC_RUN
) == 0) {
printf("dmcinit: DMC not running\n");
ifp
->if_flags
&= ~(IFF_RUNNING
|IFF_UP
);
if ((sc
->sc_flag
&DMC_BMAPPED
) == 0) {
sc
->sc_ubinfo
= uballoc(ui
->ui_ubanum
,
(caddr_t
)&dmc_base
[unit
], sizeof (struct dmc_base
), 0);
sc
->sc_flag
|= DMC_BMAPPED
;
/* initialize UNIBUS resources */
sc
->sc_iused
= sc
->sc_oused
= 0;
if ((sc
->sc_flag
&DMC_ALLOC
) == 0) {
if (dmc_ubainit(&sc
->sc_ifuba
, ui
->ui_ubanum
, 0,
(int)btoc(DMCMTU
)) == 0) {
printf("dmc%d: can't initialize\n", unit
);
ifp
->if_flags
&= ~IFF_UP
;
sc
->sc_flag
|= DMC_ALLOC
;
/* initialize buffer pool */
ifrw
= &sc
->sc_ifuba
.ifu_r
[0];
for (rp
= &sc
->sc_rbufs
[0]; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
rp
->ubinfo
= ifrw
->ifrw_info
& 0x3ffff;
rp
->flags
= DBUF_OURS
|DBUF_RCV
;
printd("rcv: 0x%x\n",rp
->ubinfo
);
ifxp
= &sc
->sc_ifuba
.ifu_w
[0];
for (rp
= &sc
->sc_xbufs
[0]; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++) {
rp
->ubinfo
= ifxp
->x_ifrw
.ifrw_info
& 0x3ffff;
rp
->flags
= DBUF_OURS
|DBUF_XMIT
;
printd("xmit: 0x%x\n",rp
->ubinfo
);
base
= sc
->sc_ubinfo
& 0x3ffff;
printd(" base 0x%x\n", base
);
dmcload(sc
, DMC_BASEI
, base
, (base
>>2)&DMC_XMEM
);
/* specify half duplex operation, flags tell if primary */
/* or secondary station */
/* use DDMCP mode in full duplex */
dmcload(sc
, DMC_CNTLI
, 0, 0);
else if (ui
->ui_flags
== 1)
/* use MAINTENENCE mode */
dmcload(sc
, DMC_CNTLI
, 0, DMC_MAINT
);
else if (ui
->ui_flags
== 2)
/* use DDCMP half duplex as primary station */
dmcload(sc
, DMC_CNTLI
, 0, DMC_HDPLX
);
else if (ui
->ui_flags
== 3)
/* use DDCMP half duplex as secondary station */
dmcload(sc
, DMC_CNTLI
, 0, DMC_HDPLX
| DMC_SEC
);
/* queue first NRCV buffers for DMC to fill */
for (rp
= &sc
->sc_rbufs
[0]; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
dmcload(sc
, DMC_READ
, rp
->ubinfo
,
(((rp
->ubinfo
>>2)&DMC_XMEM
)|rp
->cc
));
/* enable output interrupts */
while ((addr
->bsel2
&DMC_IEO
) == 0)
ifp
->if_flags
|= IFF_UP
|IFF_RUNNING
;
* Start output on interface. Get another datagram
* to send from the interface queue and map it to
* the interface before starting output.
* Must be called at spl 5
register struct dmc_softc
*sc
= &dmc_softc
[unit
];
register struct dmcbufs
*rp
;
if ((sc
->sc_flag
& DMC_ALLOC
) == 0) {
printf("dmcstart: no unibus resources!!\n");
* Dequeue up to NXMT requests and map them to the UNIBUS.
* If no more requests, or no dmc buffers available, just return.
for (rp
= &sc
->sc_xbufs
[0]; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++ ) {
/* find an available buffer */
if ((rp
->flags
&DBUF_DMCS
) == 0){
IF_DEQUEUE(&sc
->sc_if
.if_snd
, m
);
if ((rp
->flags
&DBUF_XMIT
) == 0)
printf("dmcstart: not xmit buf\n");
rp
->flags
|= (DBUF_DMCS
);
* Have request mapped to UNIBUS for transmission
rp
->cc
= (dmcput(&sc
->sc_ifuba
, n
, m
))&DMC_CCOUNT
;
dmcload(sc
, DMC_WRITE
, rp
->ubinfo
,
rp
->cc
| ((rp
->ubinfo
>>2)&DMC_XMEM
));
* Utility routine to load the DMC device registers.
dmcload(sc
, type
, w0
, w1
)
register struct dmc_softc
*sc
;
register struct dmcdevice
*addr
;
register struct dmc_command
*qp
;
unit
= (sc
- dmc_softc
)/ sizeof (struct dmc_softc
);
addr
= (struct dmcdevice
*)dmcinfo
[unit
]->ui_addr
;
/* grab a command buffer from the free list */
if ((qp
= sc
->sc_qfreeh
) == (struct dmc_command
*)0)
panic("dmc command queue overflow");
DEQUEUE(sc
->sc_qfreeh
, sc
->sc_qfreet
);
/* fill in requested info */
qp
->qp_cmd
= (type
| DMC_RQI
);
if (sc
->sc_qactive
) { /* command in progress */
QUEUE_AT_HEAD(qp
, sc
->sc_qhead
, sc
->sc_qtail
);
QUEUE_AT_TAIL(qp
, sc
->sc_qhead
, sc
->sc_qtail
);
} else { /* command port free */
addr
->bsel0
= qp
->qp_cmd
;
* DMC interface receiver interrupt.
* Ready to accept another command,
* pull one off the command queue.
register struct dmc_softc
*sc
;
register struct dmcdevice
*addr
;
register struct dmc_command
*qp
;
addr
= (struct dmcdevice
*)dmcinfo
[unit
]->ui_addr
;
if ((qp
= sc
->sc_qactive
) == (struct dmc_command
*) 0) {
printf("dmcrint: no command\n");
while (addr
->bsel0
&DMC_RDYI
) {
addr
->sel4
= qp
->qp_ubaddr
;
addr
->bsel0
&= ~(DMC_IEI
|DMC_RQI
);
printd("load done, cmd 0x%x, ubaddr 0x%x, cc 0x%x\n",
qp
->qp_cmd
, qp
->qp_ubaddr
, qp
->qp_cc
);
/* free command buffer */
QUEUE_AT_HEAD(qp
, sc
->sc_qfreeh
, sc
->sc_qfreet
);
while (addr
->bsel0
& DMC_RDYI
) {
* Can't check for RDYO here 'cause
* this routine isn't reentrant!
/* move on to next command */
if ((sc
->sc_qactive
= sc
->sc_qhead
)==(struct dmc_command
*) 0)
/* more commands to do, start the next one */
DEQUEUE(sc
->sc_qhead
, sc
->sc_qtail
);
addr
->bsel0
= qp
->qp_cmd
;
while (n
-- && (addr
->bsel0
&DMC_RDYI
) == 0)
addr
->bsel0
|= DMC_IEI
|DMC_RQI
;
/* VMS does it twice !*$%@# */
addr
->bsel0
|= DMC_IEI
|DMC_RQI
;
* DMC interface transmitter interrupt.
* A transfer may have completed, check for errors.
* If it was a read, notify appropriate protocol.
* If it was a write, pull the next one off the queue.
register struct dmc_softc
*sc
;
register struct ifnet
*ifp
;
struct uba_device
*ui
= dmcinfo
[unit
];
register struct ifqueue
*inq
;
int arg
, pkaddr
, cmd
, len
;
register struct ifrw
*ifrw
;
register struct dmcbufs
*rp
;
addr
= (struct dmcdevice
*)ui
->ui_addr
;
cmd
= addr
->bsel2
& 0xff;
arg
= addr
->sel6
& 0xffff;
if ((cmd
&DMC_RDYO
) == 0) {
printf("dmc%d: bogus xmit intr\n", unit
);
/* reconstruct UNIBUS address of buffer returned to us */
pkaddr
= ((arg
&DMC_XMEM
)<<2)|(addr
->sel4
& 0xffff);
addr
->bsel2
&= ~DMC_RDYO
;
* Pass packet to type specific
* higher-level input routine.
/* find location in dmcuba struct */
ifrw
= &sc
->sc_ifuba
.ifu_r
[0];
for (; rp
< &sc
->sc_rbufs
[NRCV
]; rp
++) {
if (rp
->ubinfo
== pkaddr
)
printf("bad rcv pkt addr 0x%x len 0x%x\n", pkaddr
, len
);
if ((rp
->flags
&DBUF_DMCS
) == 0) {
printf("dmcxint: done unalloc rbuf\n");
switch (ifp
->if_addr
.sa_family
) {
printf("dmc%d: unknown address type %d\n", unit
,
m
= dmc_get(&sc
->sc_ifuba
, ifrw
, len
, 0);
if (m
== (struct mbuf
*)0)
arg
= ifrw
->ifrw_info
& 0x3ffff;
dmcload(sc
, DMC_READ
, arg
, ((arg
>> 2) & DMC_XMEM
) | DMCMTU
);
* A write has completed, start another
* transfer if there is more data to send.
printd("OUX pkaddr 0x%x\n",pkaddr
);
/* find associated dmcbuf structure */
for (; rp
< &sc
->sc_xbufs
[NXMT
]; rp
++) {
if (rp
->ubinfo
== pkaddr
)
printf("dmc%d: bad packet address 0x%x\n",
if ((rp
->flags
&DBUF_DMCS
) == 0)
printf("dmc returned unallocated packet 0x%x\n",
rp
->flags
&= ~(DBUF_DMCS
);
printf("dmc%d: fatal error, flags=%b\n",
ifp
->if_flags
&= ~(IFF_RUNNING
|IFF_UP
);
/* master clear device */
for (i
= 100000; i
&& (addr
->bsel1
& DMC_RUN
) == 0; i
--)
/* ACCUMULATE STATISTICS */
if((sc
->sc_nobuf
++ % DMC_RPNBFS
) != 0)
if((sc
->sc_disc
++ % DMC_RPDSC
) != 0)
if((sc
->sc_timeo
++ % DMC_RPTMO
) != 0)
if((sc
->sc_datck
++ % DMC_RPDCK
) != 0)
printf("dmc%d: soft error, flags=%b\n",
printf("dmc%d: bad control %o\n", unit
, cmd
);
* Just send the data, header was supplied by
* upper level protocol routines.
register struct ifnet
*ifp
;
if (dst
->sa_family
!= ifp
->if_addr
.sa_family
) {
printf("dmc%d: af%d not supported\n", ifp
->if_unit
,
if (IF_QFULL(&ifp
->if_snd
)) {
IF_ENQUEUE(&ifp
->if_snd
, m
);
* Process an ioctl request.
register struct ifnet
*ifp
;
struct ifreq
*ifr
= (struct ifreq
*)data
;
int s
= splimp(), error
= 0;
if (ifp
->if_flags
& IFF_RUNNING
)
if_rtinit(ifp
, -1); /* delete previous route */
sin
= (struct sockaddr_in
*)&ifr
->ifr_addr
;
ifp
->if_addr
= *(struct sockaddr
*)sin
;
ifp
->if_net
= in_netof(sin
->sin_addr
);
/* set up routing table entry */
if ((ifp
->if_flags
& IFF_ROUTE
) == 0) {
rtinit(&ifp
->if_dstaddr
, &ifp
->if_addr
, RTF_HOST
|RTF_UP
);
ifp
->if_flags
|= IFF_ROUTE
;
ifp
->if_dstaddr
= ifr
->ifr_dstaddr
;
if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
* Routines supporting UNIBUS network interfaces.
* Init UNIBUS for interface on uban whose headers of size hlen are to
* end on a page boundary. We allocate a UNIBUS map register for the page
* with the header, and nmr more UNIBUS map registers for i/o on the adapter,
* doing this for each receive and transmit buffer. We also
* allocate page frames in the mbuffer pool for these pages.
dmc_ubainit(ifu
, uban
, hlen
, nmr
)
register struct dmcuba
*ifu
;
register struct ifrw
*ifrw
;
register struct ifxmt
*ifxp
;
ncl
= clrnd(nmr
+ CLSIZE
) / CLSIZE
;
if (ifu
->ifu_r
[0].ifrw_addr
) {
* If the first read buffer has a non-zero
* address, it means we have already allocated core
cp
= ifu
->ifu_r
[0].ifrw_addr
- (CLBYTES
- hlen
);
cp
= m_clalloc(NTOT
* ncl
, MPG_SPACE
);
ifu
->ifu_uba
= uba_hd
[uban
].uh_uba
;
dp
= cp
+ CLBYTES
- hlen
;
for (ifrw
= ifu
->ifu_r
; ifrw
< &ifu
->ifu_r
[NRCV
]; ifrw
++) {
for (ifxp
= ifu
->ifu_w
; ifxp
< &ifu
->ifu_w
[NXMT
]; ifxp
++) {
ifxp
->x_ifrw
.ifrw_addr
= dp
;
/* allocate for receive ring */
for (ifrw
= ifu
->ifu_r
; ifrw
< &ifu
->ifu_r
[NRCV
]; ifrw
++) {
if (dmc_ubaalloc(ifu
, ifrw
, nmr
) == 0) {
for (rw
= ifu
->ifu_r
; rw
< ifrw
; rw
++)
ubarelse(ifu
->ifu_uban
, &rw
->ifrw_info
);
/* and now transmit ring */
for (ifxp
= ifu
->ifu_w
; ifxp
< &ifu
->ifu_w
[NXMT
]; ifxp
++) {
if (dmc_ubaalloc(ifu
, ifrw
, nmr
) == 0) {
for (xp
= ifu
->ifu_w
; xp
< ifxp
; xp
++)
ubarelse(ifu
->ifu_uban
, &xp
->x_ifrw
.ifrw_info
);
for (ifrw
= ifu
->ifu_r
; ifrw
< &ifu
->ifu_r
[NRCV
]; ifrw
++)
ubarelse(ifu
->ifu_uban
, &ifrw
->ifrw_info
);
for (i
= 0; i
< nmr
; i
++)
ifxp
->x_map
[i
] = ifrw
->ifrw_mr
[i
];
m_pgfree(cp
, NTOT
* ncl
);
ifu
->ifu_r
[0].ifrw_addr
= 0;
* Setup either a ifrw structure by allocating UNIBUS map registers,
* possibly a buffered data path, and initializing the fields of
* the ifrw structure to minimize run-time overhead.
dmc_ubaalloc(ifu
, ifrw
, nmr
)
register struct ifrw
*ifrw
;
uballoc(ifu
->ifu_uban
, ifrw
->ifrw_addr
, nmr
*NBPG
+ ifu
->ifu_hlen
,
ifrw
->ifrw_bdp
= UBAI_BDP(info
);
ifrw
->ifrw_proto
= UBAMR_MRV
| (UBAI_BDP(info
) << UBAMR_DPSHIFT
);
ifrw
->ifrw_mr
= &ifu
->ifu_uba
->uba_map
[UBAI_MR(info
) + 1];
* Pull read data off a interface.
* Len is length of data, with local net header stripped.
* Off is non-zero if a trailer protocol was used, and
* gives the offset of the trailer information.
* We copy the trailer information and then all the normal
* data into mbufs. When full cluster sized units are present
* on the interface on cluster boundaries we can get them more
* easily by remapping, and take advantage of this here.
dmc_get(ifu
, ifrw
, totlen
, off0
)
register struct dmcuba
*ifu
;
register struct ifrw
*ifrw
;
struct mbuf
*top
, **mp
, *m
;
register caddr_t cp
= ifrw
->ifrw_addr
+ ifu
->ifu_hlen
;
MGET(m
, M_DONTWAIT
, MT_DATA
);
cp
= ifrw
->ifrw_addr
+ ifu
->ifu_hlen
+ off
;
len
= m
->m_len
= CLBYTES
;
m
->m_off
= (int)p
- (int)m
;
* Switch pages mapped to UNIBUS with new page p,
* as quick form of copy. Remap UNIBUS and invalidate.
cpte
= &Mbmap
[mtocl(cp
)*CLSIZE
];
ppte
= &Mbmap
[mtocl(p
)*CLSIZE
];
x
= btop(cp
- ifrw
->ifrw_addr
);
ip
= (int *)&ifrw
->ifrw_mr
[x
];
for (i
= 0; i
< CLSIZE
; i
++) {
t
= *ppte
; *ppte
++ = *cpte
; *cpte
= t
;
cpte
++->pg_pfnum
|ifrw
->ifrw_proto
;
m
->m_len
= MIN(MLEN
, len
);
bcopy(cp
, mtod(m
, caddr_t
), (unsigned)m
->m_len
);
/* sort of an ALGOL-W style for statement... */
cp
= ifrw
->ifrw_addr
+ ifu
->ifu_hlen
;
* Map a chain of mbufs onto a network interface
* in preparation for an i/o operation.
* The argument chain of mbufs includes the local network
* header which is copied to be in the mapped, aligned
register struct mbuf
*mp
;
register struct ifxmt
*ifxp
;
register struct ifrw
*ifrw
;
if (claligned(cp
) && claligned(dp
) && m
->m_len
== CLBYTES
) {
struct pte
*pte
; int *ip
;
pte
= &Mbmap
[mtocl(dp
)*CLSIZE
];
x
= btop(cp
- ifrw
->ifrw_addr
);
ip
= (int *)&ifrw
->ifrw_mr
[x
];
for (i
= 0; i
< CLSIZE
; i
++)
*ip
++ = ifrw
->ifrw_proto
| pte
++->pg_pfnum
;
xswapd
|= 1 << (x
>>(CLSHIFT
-PGSHIFT
));
m
->m_next
= ifxp
->x_xtofree
;
bcopy(mtod(m
, caddr_t
), cp
, (unsigned)m
->m_len
);
* Xswapd is the set of clusters we just mapped out. Ifxp->x_xswapd
* is the set of clusters mapped out from before. We compute
* the number of clusters involved in this operation in x.
* Clusters mapped out before and involved in this operation
* should be unmapped so original pages will be accessed by the device.
cc
= cp
- ifrw
->ifrw_addr
;
x
= ((cc
- ifu
->ifu_hlen
) + CLBYTES
- 1) >> CLSHIFT
;
ifxp
->x_xswapd
&= ~xswapd
;
while (i
= ffs(ifxp
->x_xswapd
)) {
ifxp
->x_xswapd
&= ~(1<<i
);
for (t
= 0; t
< CLSIZE
; t
++) {
ifrw
->ifrw_mr
[i
] = ifxp
->x_map
[i
];
ifxp
->x_xswapd
|= xswapd
;