* Copyright (c) 1982, 1986 Regents of the University of California.
* Redistribution and use in source and binary forms are permitted
* provided that the above copyright notice and this paragraph are
* duplicated in all such forms and that any documentation,
* advertising materials, and other materials related to such
* distribution and use acknowledge that the software was developed
* by the University of California, Berkeley. The name of the
* University may not be used to endorse or promote products derived
* from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
* @(#)if_uba.c 7.10 (Berkeley) %G%
#include "../vaxuba/ubareg.h"
#include "../vaxuba/ubavar.h"
* Routines supporting UNIBUS network interfaces.
* Support interfaces using only one BDP statically.
* Init UNIBUS for interface on uban whose headers of size hlen are to
* end on a page boundary. We allocate a UNIBUS map register for the page
* with the header, and nmr more UNIBUS map registers for i/o on the adapter,
* doing this once for each read and once for each write buffer. We also
* allocate page frames in the mbuffer pool for these pages.
if_ubaminit(ifu
, uban
, hlen
, nmr
, ifr
, nr
, ifw
, nw
)
register struct ifubinfo
*ifu
;
int uban
, hlen
, nmr
, nr
, nw
;
register struct ifrw
*ifr
;
register struct ifxmt
*ifw
;
nclbytes
= CLBYTES
* (clrnd(nmr
) / CLSIZE
);
cp
= ifr
[0].ifrw_addr
- off
;
cp
= (caddr_t
)malloc((u_long
)((nr
+ nw
) * nclbytes
), M_DEVBUF
,
for (i
= 0; i
< nr
; i
++) {
ifr
[i
].ifrw_addr
= p
+ off
;
for (i
= 0; i
< nw
; i
++) {
ifw
[i
].ifw_addr
= p
+ off
;
ifu
->iff_uba
= uba_hd
[uban
].uh_uba
;
if (if_ubaalloc(ifu
, &ifr
[i
], nmr
) == 0) {
if (if_ubaalloc(ifu
, &ifw
[i
].ifrw
, nmr
) == 0) {
for (i
= 0; i
< nmr
; i
++)
ifw
[nw
].ifw_wmap
[i
] = ifw
[nw
].ifw_mr
[i
];
ifw
[nw
].ifw_flags
= IFRW_W
;
ubarelse(ifu
->iff_uban
, &ifw
[nw
].ifw_info
);
ubarelse(ifu
->iff_uban
, &ifr
[nr
].ifrw_info
);
* Setup an ifrw structure by allocating UNIBUS map registers,
* possibly a buffered data path, and initializing the fields of
* the ifrw structure to minimize run-time overhead.
if_ubaalloc(ifu
, ifrw
, nmr
)
register struct ifrw
*ifrw
;
uballoc(ifu
->iff_uban
, ifrw
->ifrw_addr
, nmr
*NBPG
+ ifu
->iff_hlen
,
ifrw
->ifrw_bdp
= UBAI_BDP(info
);
ifrw
->ifrw_proto
= UBAMR_MRV
| (UBAI_BDP(info
) << UBAMR_DPSHIFT
);
ifrw
->ifrw_mr
= &ifu
->iff_uba
->uba_map
[UBAI_MR(info
) + (ifu
->iff_hlen
?
* Pull read data off a interface.
* Len is length of data, with local net header stripped.
* Off is non-zero if a trailer protocol was used, and
* gives the offset of the trailer information.
* We copy the trailer information and then all the normal
* data into mbufs. When full cluster sized units are present
* on the interface on cluster boundaries we can get them more
* easily by remapping, and take advantage of this here.
* Prepend a pointer to the interface structure,
* so that protocols can determine where incoming packets arrived.
* Note: we may be called to receive from a transmit buffer by some
* devices. In that case, we must force normal mapping of the buffer,
* so that the correct data will appear (only unibus maps are
* changed when remapping the transmit buffers).
if_ubaget(ifu
, ifr
, totlen
, off0
, ifp
)
register struct ifrw
*ifr
;
register caddr_t cp
= ifr
->ifrw_addr
+ ifu
->iff_hlen
, pp
;
if (ifr
->ifrw_flags
& IFRW_W
)
rcv_xmtbuf((struct ifxmt
*)ifr
);
MGET(m
, M_DONTWAIT
, MT_DATA
);
cp
= ifr
->ifrw_addr
+ ifu
->iff_hlen
+ off
;
* If doing the first mbuf and
* the interface pointer hasn't been put in,
* put it in a separate mbuf to preserve alignment.
m
->m_len
= MIN(len
, CLBYTES
);
* Switch pages mapped to UNIBUS with new page pp,
* as quick form of copy. Remap UNIBUS and invalidate.
x
= btop(cp
- ifr
->ifrw_addr
);
ip
= (int *)&ifr
->ifrw_mr
[x
];
for (i
= 0; i
< CLSIZE
; i
++) {
t
= *ppte
; *ppte
++ = *cpte
; *cpte
= t
;
*ip
++ = cpte
++->pg_pfnum
|ifr
->ifrw_proto
;
m
->m_len
= MIN(MLEN
- sizeof(ifp
), len
);
m
->m_len
= MIN(MLEN
, len
);
bcopy(cp
, mtod(m
, caddr_t
), (unsigned)m
->m_len
);
/* sort of an ALGOL-W style for statement... */
cp
= ifr
->ifrw_addr
+ ifu
->iff_hlen
;
* Prepend interface pointer to first mbuf.
*(mtod(m
, struct ifnet
**)) = ifp
;
if (ifr
->ifrw_flags
& IFRW_W
)
restor_xmtbuf((struct ifxmt
*)ifr
);
* Change the mapping on a transmit buffer so that if_ubaget may
* receive from that buffer. Copy data from any pages mapped to Unibus
* into the pages mapped to normal kernel virtual memory, so that
* they can be accessed and swapped as usual. We take advantage
* of the fact that clusters are placed on the xtofree list
* in inverse order, finding the last one.
register struct ifxmt
*ifw
;
while (i
= ffs((long)ifw
->ifw_xswapd
)) {
cp
= ifw
->ifw_base
+ i
* CLBYTES
;
ifw
->ifw_xswapd
&= ~(1<<i
);
mprev
= &ifw
->ifw_xtofree
;
for (m
= ifw
->ifw_xtofree
; m
&& m
->m_next
; m
= m
->m_next
)
bcopy(mtod(m
, caddr_t
), cp
, CLBYTES
);
for (i
= 0; i
< ifw
->ifw_nmr
; i
++)
ifw
->ifw_mr
[i
] = ifw
->ifw_wmap
[i
];
* Put a transmit buffer back together after doing an if_ubaget on it,
* which may have swapped pages.
register struct ifxmt
*ifw
;
for (i
= 0; i
< ifw
->ifw_nmr
; i
++)
ifw
->ifw_wmap
[i
] = ifw
->ifw_mr
[i
];
* Map a chain of mbufs onto a network interface
* in preparation for an i/o operation.
* The argument chain of mbufs includes the local network
* header which is copied to be in the mapped, aligned
register struct ifxmt
*ifw
;
register struct mbuf
*mp
;
if (claligned(cp
) && claligned(dp
) &&
(m
->m_len
== CLBYTES
|| m
->m_next
== (struct mbuf
*)0)) {
x
= btop(cp
- ifw
->ifw_addr
);
ip
= (int *)&ifw
->ifw_mr
[x
];
for (i
= 0; i
< CLSIZE
; i
++)
*ip
++ = ifw
->ifw_proto
| pte
++->pg_pfnum
;
xswapd
|= 1 << (x
>>(CLSHIFT
-PGSHIFT
));
m
->m_next
= ifw
->ifw_xtofree
;
bcopy(mtod(m
, caddr_t
), cp
, (unsigned)m
->m_len
);
* Xswapd is the set of clusters we just mapped out. Ifu->iff_xswapd
* is the set of clusters mapped out from before. We compute
* the number of clusters involved in this operation in x.
* Clusters mapped out before and involved in this operation
* should be unmapped so original pages will be accessed by the device.
x
= ((cc
- ifu
->iff_hlen
) + CLBYTES
- 1) >> CLSHIFT
;
ifw
->ifw_xswapd
&= ~xswapd
;
while (i
= ffs((long)ifw
->ifw_xswapd
)) {
ifw
->ifw_xswapd
&= ~(1<<i
);
for (t
= 0; t
< CLSIZE
; t
++) {
ifw
->ifw_mr
[i
] = ifw
->ifw_wmap
[i
];
ifw
->ifw_xswapd
|= xswapd
;