- * Allocate as many contiguous UBA mapping registers
- * as are necessary to do transfer of bcnt bytes
- * to/from location baddr. Wait for enough map registers.
- *
- * Bdpflg is non-zero if a "buffered data path" (BDP) is
- * to be used, else 0 -> use direct data path (DDP). Return
+ * Do transfer on device argument. The controller
+ * and uba involved are implied by the device.
+ * We queue for resource wait in the uba code if necessary.
+ * We return 1 if the transfer was started, 0 if it was not.
+ * If you call this routine with the head of the queue for a
+ * UBA, it will automatically remove the device from the UBA
+ * queue before it returns. If some other device is given
+ * as argument, it will be added to the request queue if the
+ * request cannot be started immediately. This means that
+ * passing a device which is on the queue but not at the head
+ * of the request queue is likely to be a disaster.
+ */
+ubago(ui)
+ register struct uba_dinfo *ui;
+{
+ register struct uba_minfo *um = ui->ui_mi;
+ register struct uba_hd *uh;
+ register int s, unit;
+
+ uh = &uba_hd[um->um_ubanum];
+ s = spl6();
+ um->um_ubinfo = ubasetup(um->um_ubanum, um->um_tab.b_actf->b_actf,
+ UBA_NEEDBDP|UBA_CANTWAIT);
+ if (um->um_ubinfo == 0) {
+ if (uh->uh_actf != ui) {
+ ui->ui_forw = NULL;
+ if (uh->uh_actf == NULL)
+ uh->uh_actf = ui;
+ else
+ uh->uh_actl->ui_forw = ui;
+ uh->uh_actl = ui;
+ }
+ splx(s);
+ return (0);
+ }
+ splx(s);
+ if (ui->ui_dk >= 0) {
+ unit = ui->ui_dk;
+ dk_busy |= 1<<unit;
+ }
+ if (uh->uh_actf == ui)
+ uh->uh_actf = ui->ui_forw;
+ (*um->um_driver->ud_dgo)(um);
+ if (ui->ui_dk >= 0) {
+ dk_xfer[unit]++;
+ dk_wds[unit] += um->um_tab.b_actf->b_bcount>>6;
+ }
+ return (1);
+}
+
+/*
+ * Allocate and setup UBA map registers, and bdp's
+ * Flags says whether bdp is needed, whether the caller can't
+ * wait (e.g. if the caller is at interrupt level).