This commit is contained in:
seta75D
2021-10-11 18:37:13 -03:00
commit ff309bfe1c
14130 changed files with 3180272 additions and 0 deletions

21
sys/scsi/impl/Makefile Normal file
View File

@@ -0,0 +1,21 @@
#
# @(#)Makefile 1.1 94/10/31 SMI
#
HDIR = $(DESTDIR)/usr/include/scsi/impl
CSRC =
HFILES = commands.h inquiry.h mode.h pkt_wrapper.h sense.h \
services.h status.h transport.h types.h uscsi.h
SUBDIR =
all: $(HFILES)
install:
install_h: all
rm -rf $(HDIR)
install -d -m 755 $(HDIR)
install -m 444 $(HFILES) $(HDIR)
FRC:

250
sys/scsi/impl/commands.h Normal file
View File

@@ -0,0 +1,250 @@
#ident "@(#)commands.h 1.1 94/10/31 SMI"
/*
* Copyright (c) 1989, 1990 by Sun Microsystems, Inc.
*/
#ifndef _scsi_impl_commands_h
#define _scsi_impl_commands_h
/*
* Implementation dependent (i.e., Vendor Unique) command definitions.
* This file is included by <scsi/generic/commands.h>
*/
/*
*
* Private Vendor Unique Commands
*
*/
#ifdef ACB4000
#define SCMD_TRANSLATE 0x0F
#endif /* ACB4000 */
/*
*
* Implementation dependent view of a SCSI command descriptor block
*
* (Original attribution: Kevin Sheehan, Sun Consulting)
*/
/*
* Standard SCSI control blocks definitions.
*
* These go in or out over the SCSI bus.
*
* The first 11 bits of the command block are the same for all three
* defined command groups. The first byte is an operation which consists
* of a command code component and a group code component. The first 3 bits
* of the second byte are the unit number.
*
* The group code determines the length of the rest of the command.
* Group 0 commands are 6 bytes, Group 1 are 10 bytes, and Group 5
* are 12 bytes. Groups 2-4 are Reserved. Groups 6 and 7 are Vendor
* Unique.
*
*/
/*
* At present, our standard cdb's will reserve enough space for
* use with up to Group 5 commands. This may have to change soon
* if optical disks have 20 byte or longer commands. At any rate,
* the Sun SCSI implementation has no problem handling arbitrary
* length commands; it is just more efficient to declare it as
* certain size (to avoid runtime allocation overhead).
*/
#define CDB_SIZE CDB_GROUP5
union scsi_cdb { /* scsi command description block */
struct {
u_char cmd; /* cmd code (byte 0) */
u_char lun :3, /* lun (byte 1) */
tag :5; /* rest of byte 1 */
union { /* bytes 2 - 31 */
u_char scsi[CDB_SIZE-2];
/*
* G R O U P 0 F O R M A T (6 bytes)
*/
#define scc_cmd cdb_un.cmd
#define scc_lun cdb_un.lun
#define g0_addr2 cdb_un.tag
#define g0_addr1 cdb_un.sg.g0.addr1
#define g0_addr0 cdb_un.sg.g0.addr0
#define g0_count0 cdb_un.sg.g0.count0
#define g0_vu_1 cdb_un.sg.g0.vu_57
#define g0_vu_0 cdb_un.sg.g0.vu_56
#define g0_flag cdb_un.sg.g0.flag
#define g0_link cdb_un.sg.g0.link
/*
* defines for SCSI tape cdb.
*/
#define t_code cdb_un.tag
#define high_count cdb_un.sg.g0.addr1
#define mid_count cdb_un.sg.g0.addr0
#define low_count cdb_un.sg.g0.count0
struct scsi_g0 {
u_char addr1; /* middle part of address */
u_char addr0; /* low part of address */
u_char count0; /* usually block count */
u_char vu_57 :1; /* vendor unique (byte 5 bit 7*/
u_char vu_56 :1; /* vendor unique (byte 5 bit 6*/
u_char rsvd :4; /* reserved */
u_char flag :1; /* interrupt when done */
u_char link :1; /* another command follows */
} g0;
/*
* G R O U P 1 F O R M A T (10 byte)
*/
#define g1_reladdr cdb_un.tag
#define g1_rsvd0 cdb_un.sg.g1.rsvd1
#define g1_addr3 cdb_un.sg.g1.addr3 /* msb */
#define g1_addr2 cdb_un.sg.g1.addr2
#define g1_addr1 cdb_un.sg.g1.addr1
#define g1_addr0 cdb_un.sg.g1.addr0 /* lsb */
#define g1_count1 cdb_un.sg.g1.count1 /* msb */
#define g1_count0 cdb_un.sg.g1.count0 /* lsb */
#define g1_vu_1 cdb_un.sg.g1.vu_97
#define g1_vu_0 cdb_un.sg.g1.vu_96
#define g1_flag cdb_un.sg.g1.flag
#define g1_link cdb_un.sg.g1.link
struct scsi_g1 {
u_char addr3; /* most sig. byte of address*/
u_char addr2;
u_char addr1;
u_char addr0;
u_char rsvd1; /* reserved (byte 6) */
u_char count1; /* transfer length (msb) */
u_char count0; /* transfer length (lsb) */
u_char vu_97 :1; /* vendor unique (byte 9 bit 7*/
u_char vu_96 :1; /* vendor unique (byte 9 bit 6*/
u_char rsvd0 :4; /* reserved */
u_char flag :1; /* interrupt when done */
u_char link :1; /* another command follows */
} g1;
/*
* G R O U P 5 F O R M A T (12 byte)
*/
#define scc5_reladdr cdb_un.tag
#define scc5_addr3 cdb_un.sg.g5.addr3 /* msb */
#define scc5_addr2 cdb_un.sg.g5.addr2
#define scc5_addr1 cdb_un.sg.g5.addr1
#define scc5_addr0 cdb_un.sg.g5.addr0 /* lsb */
#define scc5_count1 cdb_un.sg.g5.count1 /* msb */
#define scc5_count0 cdb_un.sg.g5.count0 /* lsb */
#define scc5_vu_1 cdb_un.sg.g5.v1
#define scc5_vu_0 cdb_un.sg.g5.v0
#define scc5_flag cdb_un.sg.g5.flag
struct scsi_g5 {
u_char addr3; /* most sig. byte of address*/
u_char addr2;
u_char addr1;
u_char addr0;
u_char rsvd3; /* reserved */
u_char rsvd2; /* reserved */
u_char rsvd1; /* reserved */
u_char count1; /* transfer length (msb) */
u_char count0; /* transfer length (lsb) */
u_char vu_117 :1; /* vendor unique (byte 11 bit 7*/
u_char vu_116 :1; /* vendor unique (byte 11 bit 6*/
u_char rsvd0 :4; /* reserved */
u_char flag :1; /* interrupt when done */
u_char link :1; /* another command follows */
} g5;
}sg;
} cdb_un;
u_char cdb_opaque[CDB_SIZE]; /* addressed as an opaque char array */
u_long cdb_long[CDB_SIZE / sizeof (long)]; /* as a longword array */
};
/*
* Various useful Macros for SCSI commands
*/
/*
* defines for getting/setting fields within the various command groups
*/
#define GETCMD(cdb) ((cdb)->scc_cmd & 0x1F)
#define GETGROUP(cdb) (CDB_GROUPID((cdb)->scc_cmd))
#define FORMG0COUNT(cdb, cnt) (cdb)->g0_count0 = (cnt)
#define FORMG0ADDR(cdb, addr) (cdb)->g0_addr2 = (addr) >> 16;\
(cdb)->g0_addr1 = ((addr) >> 8) & 0xFF;\
(cdb)->g0_addr0 = (addr) & 0xFF
#define GETG0ADDR(cdb) (((cdb)->g0_addr2 & 0x1F) << 16) + \
((cdb)->g0_addr1 << 8) + ((cdb)->g0_addr0)
#define GETG0TAG(cdb) ((cdb)->g0_addr2)
#define FORMG0COUNT_S(cdb,cnt) (cdb)->high_count = (cnt) >> 16;\
(cdb)->mid_count = ((cnt) >> 8) & 0xFF;\
(cdb)->low_count= (cnt) & 0xFF
#define FORMG1COUNT(cdb, cnt) (cdb)->g1_count1 = ((cnt) >> 8);\
(cdb)->g1_count0 = (cnt) & 0xFF
#define FORMG1ADDR(cdb, addr) (cdb)->g1_addr3 = (addr) >> 24;\
(cdb)->g1_addr2 = ((addr) >> 16) & 0xFF;\
(cdb)->g1_addr1 = ((addr) >> 8) & 0xFF;\
(cdb)->g1_addr0 = (addr) & 0xFF
#define GETG1ADDR(cdb) ((cdb)->g1_addr3 << 24) + \
((cdb)->g1_addr2 << 16) + \
((cdb)->g1_addr1 << 8) + \
((cdb)->g1_addr0)
#define GETG1TAG(cdb) (cdb)->g1_reladdr
#define FORMG5COUNT(cdb, cnt) (cdb)->scc5_count1 = ((cnt) >> 8);\
(cdb)->scc5_count0 = (cnt) & 0xFF
#define FORMG5ADDR(cdb, addr) (cdb)->scc5_addr3 = (addr) >> 24;\
(cdb)->scc5_addr2 = ((addr) >> 16) & 0xFF;\
(cdb)->scc5_addr1 = ((addr) >> 8) & 0xFF;\
(cdb)->scc5_addr0 = (addr) & 0xFF
#define GETG5ADDR(cdb) ((cdb)->scc5_addr3 << 24) + \
((cdb)->scc5_addr2 << 16) + \
((cdb)->scc5_addr1 << 8) + \
((cdb)->scc5_addr0)
#define GETG5TAG(cdb) (cdb)->scc5_reladdr
/*
* Shorthand macros for forming commands
*/
#define MAKECOM_COMMON(pktp, devp, flag, cmd) \
(pktp)->pkt_address = (devp)->sd_address, \
(pktp)->pkt_flags = (flag), \
((union scsi_cdb *)(pktp)->pkt_cdbp)->scc_cmd = (cmd), \
((union scsi_cdb *)(pktp)->pkt_cdbp)->scc_lun = \
(pktp)->pkt_address.a_lun
#define MAKECOM_G0(pktp, devp, flag, cmd, addr, cnt) \
MAKECOM_COMMON((pktp), (devp), (flag), (cmd)), \
FORMG0ADDR(((union scsi_cdb *)(pktp)->pkt_cdbp), (addr)), \
FORMG0COUNT(((union scsi_cdb *)(pktp)->pkt_cdbp), (cnt))
#define MAKECOM_G0_S(pktp, devp, flag, cmd, cnt, fixbit) \
MAKECOM_COMMON((pktp), (devp), (flag), (cmd)), \
FORMG0COUNT_S(((union scsi_cdb *)(pktp)->pkt_cdbp), (cnt)), \
((union scsi_cdb *)(pktp)->pkt_cdbp)->t_code = (fixbit)
#define MAKECOM_G1(pktp, devp, flag, cmd, addr, cnt) \
MAKECOM_COMMON((pktp), (devp), (flag), (cmd)), \
FORMG1ADDR(((union scsi_cdb *)(pktp)->pkt_cdbp), (addr)), \
FORMG1COUNT(((union scsi_cdb *)(pktp)->pkt_cdbp), (cnt))
#define MAKECOM_G5(pktp, devp, flag, cmd, addr, cnt) \
MAKECOM_COMMON((pktp), (devp), (flag), (cmd)), \
FORMG5ADDR(((union scsi_cdb *)(pktp)->pkt_cdbp), (addr)), \
FORMG5COUNT(((union scsi_cdb *)(pktp)->pkt_cdbp), (cnt))
#ifdef KERNEL
extern void makecom_g0(), makecom_g0_s(), makecom_g1(), makecom_g5();
#endif
#endif /* !_scsi_impl_commands_h */

26
sys/scsi/impl/inquiry.h Normal file
View File

@@ -0,0 +1,26 @@
#ident "@(#)inquiry.h 1.1"
/* @(#)inquiry.h 1.1 94/10/31 SMI */
/*
* Copyright (c) 1990 Sun Microsystems, Inc.
*/
#ifndef _scsi_impl_inquiry_h
#define _scsi_impl_inquiry_h
/*
* Implementation inquiry data that is not within
* the scope of any released SCSI standard.
*/
/*
* Minimum inquiry data length (includes up through RDF field)
*/
#define SUN_MIN_INQLEN 4
/*
* Inquiry data size definition
*/
#define SUN_INQSIZE (sizeof (struct scsi_inquiry))
#endif _scsi_impl_inquiry_h

156
sys/scsi/impl/mode.h Normal file
View File

@@ -0,0 +1,156 @@
#ident "@(#)mode.h 1.1 94/10/31 SMI"
/*
* Copyright (c) 1990, 1991 by Sun Microsystems Inc.
*/
#ifndef _scsi_impl_mode_h
#define _scsi_impl_mode_h
/*
* Defines and Structures for SCSI Mode Sense/Select data
*
* Implementation Specific variations
*/
/*
* Variations to Direct Access device pages
*/
/*
* Page 1: CCS error recovery page is a little different than SCSI-2
*/
#define PAGELENGTH_DAD_MODE_ERR_RECOV_CCS 0x06
struct mode_err_recov_ccs {
struct mode_page mode_page; /* common mode page header */
u_char awre : 1, /* auto write realloc enabled */
arre : 1, /* auto read realloc enabled */
tb : 1, /* transfer block */
rc : 1, /* read continuous */
eec : 1, /* enable early correction */
per : 1, /* post error */
dte : 1, /* disable transfer on error */
dcr : 1; /* disable correction */
u_char retry_count;
u_char correction_span;
u_char head_offset_count;
u_char strobe_offset_count;
u_char recovery_time_limit;
};
/*
* Page 2: CCS Disconnect/Reconnect Page
*/
struct mode_disco_reco_ccs {
struct mode_page mode_page; /* common mode page header */
u_char buffer_full_ratio; /* write, how full before reconnect? */
u_char buffer_empty_ratio; /* read, how full before reconnect? */
u_short bus_inactivity_limit; /* how much bus quiet time for BSY- */
u_short disconect_time_limit; /* min to remain disconnected */
u_short connect_time_limit; /* min to remain connected */
u_char reserved[2];
};
/*
* Page 3: CCS Direct Access Device Format Parameters
*
* The 0x8 bit in the Drive Type byte is used in CCS
* as an INHIBIT SAVE bit. This bit is not in SCSI-2.
*/
#define _reserved_ins ins
/*
* Page 0x4 - CCS Rigid Disk Drive Geometry Parameters
*/
struct mode_geometry_ccs {
struct mode_page mode_page; /* common mode page header */
u_char cyl_ub; /* number of cylinders */
u_char cyl_mb;
u_char cyl_lb;
u_char heads; /* number of heads */
u_char precomp_cyl_ub; /* cylinder to start precomp */
u_char precomp_cyl_mb;
u_char precomp_cyl_lb;
u_char current_cyl_ub; /* cyl to start reduced current */
u_char current_cyl_mb;
u_char current_cyl_lb;
u_short step_rate; /* drive step rate */
u_char landing_cyl_ub; /* landing zone cylinder */
u_char landing_cyl_mb;
u_char landing_cyl_lb;
u_char reserved[3];
};
/*
* Page 0x38 - This is the CCS Cache Page
*/
#define DAD_MODE_CACHE_CCS 0x38
struct mode_cache_ccs {
struct mode_page mode_page; /* common mode page header */
u_char mode; /* Cache control and size */
u_char threshold; /* Prefetch threshold */
u_char max_prefetch; /* Max. prefetch */
u_char max_multiplier; /* Max. prefetch multiplier */
u_char min_prefetch; /* Min. prefetch */
u_char min_multiplier; /* Min. prefetch multiplier */
u_char rsvd2[8];
};
/*
* Emulex MD21 Unique Mode Select/Sense structure.
* This is apparently not used, although the MD21
* documentation refers to it.
*
* The medium_type in the mode header must be 0x80
* to indicate a vendor unique format. There is then
* a standard block descriptor page, which must be
* zeros (although the block descriptor length is set
* appropriately in the mode header).
*
* After this stuff, comes the vendor unique ESDI
* format parameters for the MD21.
*
* Notes:
*
* 1) The logical number of sectors/track should be the
* number of physical sectors/track less the number spare
* sectors/track.
*
* 2) The logical number of cylinders should be the
* number of physical cylinders less three (3) reserved
* for use by the drive, and less any alternate cylinders
* allocated.
*
* 3) head skew- see MD21 manual.
*/
struct emulex_format_params {
u_char alt_cyl; /* number of alternate cylinders */
u_char nheads : 4, /* number of heads */
ssz : 1, /* sector size. 1 == 256 bps, 0 == 512 bps */
sst : 2, /* spare sectors per track */
: 1;
u_char nsect; /* logical sectors/track */
u_char ncyl_hi; /* logical number of cylinders, msb */
u_char ncyl_lo; /* logical number of cylinders, lsb */
u_char head_skew; /* head skew */
u_char reserved[3];
};
#endif /* !_scsi_impl_mode_h */

125
sys/scsi/impl/pkt_wrapper.h Normal file
View File

@@ -0,0 +1,125 @@
/* @(#)pkt_wrapper.h 1.1 94/10/31 SMI */
#ifndef _scsi_impl_pktwrapper_h
#define _scsi_impl_pktwrapper_h
/*
* Copyright (c) 1989 Sun Microsystems, Inc.
*/
/*
* Implementation specific SCSI definitions for wrapping around
* the generic scsi command packet.
*/
#include <scsi/scsi_types.h>
struct dataseg {
u_long d_base; /* base of current segment */
u_long d_count; /* length of current segment */
struct dataseg *d_next; /* pointer to next segment */
};
struct scsi_cmd {
struct scsi_pkt cmd_pkt; /* the generic packet itself */
caddr_t cmd_cdbp; /* active command pointer */
caddr_t cmd_scbp; /* active status pointer */
u_long cmd_data; /* active data 'pointer' */
u_long cmd_saved_data; /* saved data 'pointer' */
struct dataseg cmd_mapseg; /* mapped in data segment */
struct dataseg cmd_subseg; /* initial data sub-segment */
struct dataseg *cmd_cursubseg; /* current data sub-segment */
u_long cmd_timeout; /* command timeout */
u_short cmd_flags; /* private flags */
u_char cmd_scblen; /* length of scb */
u_char cmd_scb[STATUS_SIZE]; /* 3 byte scb */
u_char cmd_cdblen; /* length of cdb */
u_char cmd_cdb[CDB_SIZE]; /* 'generic' Sun cdb */
};
/*
* compatibility defines- if only one data segment is mapped in (default case)
* this will suffice.
*/
#define cmd_mapping cmd_mapseg.d_base
#define cmd_dmacount cmd_mapseg.d_count
/*
* A note about dataseg structures:
*
* The cmd_mapseg is the beginning of a list of mapped in memory
* (mapped in by dma allocation routines). This mapping is what
* is appropriate for I/O devices on a given architecture. For
* most current Sun platforms, this will be a single mapping
* established in DVMA space.
*
* The cmd_subseg and cmd_cursubseg tags are for use by the host
* adapter to keep track of data transfers to and from the memory
* mappings described in cmd_mapseg. If a SCSI target uses the
* MODIFY DATA POINTER message, or some combination of the SAVE
* DATA POINTER/RESTORE POINTERS messages, this mechanism allows
* the host adapter to keep track of whether data was retransmitted
* by the target, or whether the host adapter picked up or sent only
* bits and pieces of the data.
*
* A note about the data 'pointers':
*
* In this implementation, these pointers are actually
* DVMA mapping cookies- that is, they specify the mapping
* register and offset from the beginning of DVMA. On many
* baseline Sun machines DVMA hardware refers to specific set
* of mapping registers that translate Multibus or VME dma
* requests (in the range of 0 to 256kb or 0 to 1 mb resp.)
* into physical OBMEM references. On those machines, the
* SunOS kernel maintains alias addressing of the same
* physical locations by wiring down the same amount of space
* that can then be referenced through the kernel addresses
* DVMA[0..mmu_ptob(ndvmapages)]. On some other Sun machines,
* the actual hardware implementation may be different (i.e.,
* there is no actual specific extra hardware- the dma
* requests may actually use the same MMU mapping as the
* kernel), but for all Sun machines to date, there is
* always some meaning to DVMA. Each specific host adapter,
* if it assumes dma information (i.e., data 'pointers'
* here) is a DVMA mapping cookie, can provide the appropriate
* and specific translation to that required by its own
* dma engine.
*
*
* A note about the cmd_cdb && cmd_scb structures:
*
* If the command allocation requested exceeds the size of c_cdb,
* the cdb will be allocated outside this structure (via kmem_alloc)
* The same applies to cmd_scb.
*
*/
/*
* These are the defined flags for this structure
*/
#define CFLAG_DMAVALID 0x01 /* dma mapping valid */
#define CFLAG_DMASEND 0x02 /* data is going 'out' */
#define CFLAG_DMAKEEP 0x04 /* don't unmap on dma free */
#define CFLAG_CMDDISC 0x10 /* cmd currently disconnected */
#define CFLAG_WATCH 0x20 /* watchdog time for this command */
#define CFLAG_CDBEXTERN 0x40 /* cdb kmem_alloc'd */
#define CFLAG_SCBEXTERN 0x80 /* scb kmem_alloc'd */
#define CFLAG_CMDPROXY 0x100 /*
* cmd is a 'proxy' command -
* i.e., run by the host adapter
* for specific message reasons
* (e.g., ABORT or RESET operations)
*/
#define CFLAG_NEEDSEG 0x1000 /*
* Need a new dma segment on next
* data phase.
*/
#ifdef KERNEL
extern int scsi_chkdma();
#endif
#endif _scsi_impl_pktwrapper_h

View File

@@ -0,0 +1,38 @@
#ident "@(#)scsi_capabilities.c 1.1 94/10/31 SMI"
/*
* Copyright (c) 1988, 1989, 1990 by Sun Microsystems, Inc.
*/
/*
*
* Generic Capabilities Routines
*
*/
#include <scsi/scsi.h>
int
scsi_ifgetcap(ap, cap, whom)
struct scsi_address *ap;
char *cap;
{
register struct scsi_transport *tranp;
tranp = (struct scsi_transport *) ap->a_cookie;
return (*tranp->tran_getcap)(ap, cap, whom);
}
int
scsi_ifsetcap(ap, cap, value, whom)
struct scsi_address *ap;
char *cap;
int value, whom;
{
register struct scsi_transport *tranp;
tranp = (struct scsi_transport *) ap->a_cookie;
return (*tranp->tran_setcap)(ap, cap, value, whom);
}

View File

@@ -0,0 +1,40 @@
#ifndef lint
static char sccsid[] = "@(#)scsi_control.c 1.1 94/10/31 SMI";
#endif lint
/*
* Copyright (c) 1989 Sun Microsystems, Inc.
*/
/****************************************************************
* *
* Generic Abort and Reset Routines *
* *
****************************************************************/
#include <scsi/scsi.h>
int
scsi_abort(ap,pkt)
struct scsi_address *ap;
struct scsi_pkt *pkt;
{
struct scsi_transport *tranp;
tranp = (struct scsi_transport *) ap->a_cookie;
return (*tranp->tran_abort)(ap,pkt);
}
int
scsi_reset(ap,level)
struct scsi_address *ap;
int level;
{
struct scsi_transport *tranp;
tranp = (struct scsi_transport *) ap->a_cookie;
return (*tranp->tran_reset)(ap,level);
}

58
sys/scsi/impl/scsi_data.c Normal file
View File

@@ -0,0 +1,58 @@
#ident "@(#)scsi_data.c 1.1 94/10/31 SMI"
/*
* Copyright (c) 1988, 1989, 1990 by Sun Microsystems, Inc.
*/
/*
* Global SCSI data
*/
#include <scsi/scsi.h>
char *sense_keys[NUM_SENSE_KEYS + NUM_IMPL_SENSE_KEYS] = {
"No Additional Sense", /* 0x00 */
"Soft Error", /* 0x01 */
"Not Ready", /* 0x02 */
"Media Error", /* 0x03 */
"Hardware Error", /* 0x04 */
"Illegal Request", /* 0x05 */
"Unit Attention", /* 0x06 */
"Write Protected", /* 0x07 */
"Blank Check", /* 0x08 */
"Vendor Unique", /* 0x09 */
"Copy Aborted", /* 0x0a */
"Aborted Command", /* 0x0b */
"Equal Error", /* 0x0c */
"Volume Overflow", /* 0x0d */
"Miscompare Error", /* 0x0e */
"Reserved", /* 0x0f */
"fatal", /* 0x10 */
"timeout", /* 0x11 */
"EOF", /* 0x12 */
"EOT", /* 0x13 */
"length error", /* 0x14 */
"BOT", /* 0x15 */
"wrong tape media" /* 0x16 */
};
char *state_bits = "\20\05STS\04XFER\03CMD\02SEL\01ARB";
/*
* Common Capability Strings
* See <scsi/impl/services.h> for
* definitions of positions in this
* array
*/
char *scsi_capstrings[] = {
"dma_max",
"msg_out",
"disconnect",
"synchronous",
"wide_xfer",
"parity",
"initiator-id",
"untagged-qing",
"tagged-qing",
0
};

View File

@@ -0,0 +1,656 @@
#ident "@(#)scsi_resource.c 1.1 94/10/31 SMI"
/*
* Copyright (c) 1988, 1989, 1990 Sun Microsystems, Inc.
*/
#define DPRINTF if (scsi_options & SCSI_DEBUG_LIB) printf
/*
* Generic Resource Allocation Routines
*/
#include <scsi/scsi.h>
struct scsi_pkt *
scsi_resalloc(ap, cmdlen, statuslen, dmatoken, callback)
struct scsi_address *ap;
int cmdlen, statuslen;
opaque_t dmatoken;
int (*callback)();
{
register struct scsi_pkt *pktp;
register struct scsi_transport *tranp;
/*
* The first part of the address points to
* an array of transport function points
*/
tranp = (struct scsi_transport *) ap->a_cookie;
pktp = (*tranp->tran_pktalloc)(ap, cmdlen, statuslen, callback);
if (pktp == (struct scsi_pkt *) 0) {
if (callback == SLEEP_FUNC) {
panic("scsi_resalloc: No packet after sleep");
/*NOTREACHED*/
}
} else if (dmatoken != (opaque_t) 0) {
if ((*tranp->tran_dmaget)(pktp, dmatoken, callback) == NULL) {
if (callback == SLEEP_FUNC) {
panic("scsi_resalloc: No dma after sleep");
/*NOTREACHED*/
}
/*
* if we didn't get dma resources in this function,
* free up the packet resources.
*/
(*tranp->tran_pktfree)(pktp);
pktp = (struct scsi_pkt *) 0;
}
}
return (pktp);
}
struct scsi_pkt *
scsi_pktalloc(ap, cmdlen, statuslen, callback)
struct scsi_address *ap;
int cmdlen, statuslen;
int (*callback)();
{
struct scsi_transport *tranp = (struct scsi_transport *) ap->a_cookie;
return ((*tranp->tran_pktalloc)(ap, cmdlen, statuslen, callback));
}
struct scsi_pkt *
scsi_dmaget(pkt, dmatoken, callback)
struct scsi_pkt *pkt;
opaque_t dmatoken;
int (*callback)();
{
struct scsi_transport *tranp;
if (dmatoken == (opaque_t) NULL) {
return ((struct scsi_pkt *) NULL);
}
tranp = (struct scsi_transport *) pkt->pkt_address.a_cookie;
return ((*tranp->tran_dmaget)(pkt, dmatoken, callback));
}
/*
* Generic Resource Deallocation Routines
*/
void
scsi_dmafree(pkt)
struct scsi_pkt *pkt;
{
struct scsi_transport *tranp =
(struct scsi_transport *) pkt->pkt_address.a_cookie;
(*tranp->tran_dmafree)(pkt);
}
void
scsi_resfree(pkt)
struct scsi_pkt *pkt;
{
struct scsi_transport *tranp =
(struct scsi_transport *) pkt->pkt_address.a_cookie;
/*
* Free DMA resources if any need to be freed.
*/
(*tranp->tran_dmafree)(pkt);
/*
* free packet.
*/
(*tranp->tran_pktfree)(pkt);
}
/*
* Standard Resource Allocation/Deallocation Routines
*/
/*
* When Host Adapters don't want to supply their own resource allocation
* routines, and they can live with certain assumptions about DVMA,
* these routines are stuffed into their scsi_transport structures
* which they then export to the library.
*/
/*
* Local resource management data && defines
*/
#if defined(sun4c) || defined(sun4m)
#define DMA dvmamap
#else defined(sun4c) || defined(sun4m)
#define DMA mb_hd.mh_map
#endif defined(sun4c) || defined(sun4m)
int scsi_ncmds, scsi_spl;
static int sfield();
static int scsi_cmdwake = 0;
static struct scsi_cmd *scsibase;
/*
* This code shamelessly stolen from mb_machdep.c.
* Full attribution and credit to John Pope.
*
* Drivers queue function pointers here when they have to wait
* for resources. Subsequent calls from the same driver that are
* forced to wait need not queue up again, as the function pointed
* to will run the driver's internal queues until done or space runs
* out again.
*
* The difficulty is that we may block on two different things here:
* packet allocation itself, and DVMA mapping resources associated with
* the packet. If we cannot get a packet, no problem. If we can get
* or have already gotten a packet, but cannot get DVMA mapping resources,
* things get a bit sticky. The DVMA mapping callback routine expects
* us to return DVMA_RUNOUT if DVMA allocation fails again- but *our*
* callback to the target driver may return failure not due to DVMA
* allocation failure (again) but packet allocation failure instead.
* In either case, we assume that *we* have appropriately re-queued
* the caller, but we have to know what kind of allocation failure
* it was in order to appropriately notify the sentinel in the DVMA
* map allocation callback code.
*
* In other words, the SCSA specification screwed this up. Oh well.
*
* The magic number for SCQLEN was picked because:
*
* a) That seems a reasonable limit for the number of separate and
* distinct target drivers that can latch up waiting for resources.
*
* b) it is a power of two, to make the cycle easier.
*
*/
#define SCQLEN 0x8
#define SCQLENMASK 0x7
struct scq {
u_int qlen;
u_int qstore;
u_int qretrv;
u_int ncalls;
u_int incallback;
func_t funcp[SCQLEN];
};
static struct scq scpq, scdq;
static void scq_store(); /* store element */
static func_t scq_retrieve(); /* retrieve element */
/*
* resource initializer
*/
void
scsi_rinit()
{
if (scsibase != (struct scsi_cmd *) 0) {
return;
}
/*
* start with a minimum of twice the per_dev requirement
*/
scsi_addcmds(scsi_ncmds_per_dev << 1);
if (scsibase == (struct scsi_cmd *) 0) {
panic("No space for scsi command structures");
/*NOTREACHED*/
}
scpq.qretrv = scdq.qretrv = SCQLEN-1;
scpq.incallback = scdq.incallback = 0;
}
void
scsi_addcmds(ncmds)
int ncmds;
{
register s, i;
register struct scsi_cmd *sp;
sp = (struct scsi_cmd *)
kmem_zalloc((u_int) sizeof (struct scsi_cmd) * ncmds);
if (sp == (struct scsi_cmd *) 0) {
return;
}
scsi_ncmds += ncmds;
s = splr(scsi_spl);
for (i = 0; i < ncmds-1; i++)
sp[i].cmd_pkt.pkt_ha_private = (opaque_t) &sp[i+1];
sp[ncmds-1].cmd_pkt.pkt_ha_private = (opaque_t) scsibase;
scsibase = sp;
(void) splx(s);
}
/*
* pktalloc
*/
struct scsi_pkt *
scsi_std_pktalloc(ap, cmdlen, statuslen, callback)
struct scsi_address *ap;
int cmdlen, statuslen;
int (*callback)();
{
register struct scsi_cmd *cmd;
register s;
register caddr_t cdbp, scbp;
cdbp = scbp = (caddr_t) 0;
s = splr(scsi_spl);
cmd = scsibase;
for (;;) {
if (cmd == (struct scsi_cmd *) 0) {
if (callback == SLEEP_FUNC) {
if (servicing_interrupt()) {
panic("scsi_std_pktalloc");
/*NOTREACHED*/
}
scsi_cmdwake++;
(void) sleep((caddr_t)&scsibase, PRIBIO);
cmd = scsibase;
/*
* around the top again...
*/
continue;
} else if (callback != NULL_FUNC) {
scq_store(&scpq, callback);
}
break;
}
if (cmdlen > CDB_SIZE) {
cdbp = kmem_zalloc((unsigned) cmdlen);
if (cdbp == (caddr_t) 0) {
cmd = (struct scsi_cmd *) 0;
continue;
}
}
if (statuslen > STATUS_SIZE) {
scbp = kmem_zalloc((unsigned) statuslen);
if (scbp == (caddr_t) 0) {
if (cdbp != (caddr_t) 0) {
(void) kmem_free_intr (cdbp,
(unsigned) cmdlen);
}
cmd = (struct scsi_cmd *) 0;
continue;
}
}
scsibase = (struct scsi_cmd *) cmd->cmd_pkt.pkt_ha_private;
break;
}
/*
* We can safely drop priority now
*/
(void) splx(s);
if (cmd != (struct scsi_cmd *) 0) {
bzero ((caddr_t)cmd, sizeof (struct scsi_cmd));
if (cdbp != (caddr_t) 0) {
cmd->cmd_pkt.pkt_cdbp = (opaque_t) cdbp;
cmd->cmd_flags |= CFLAG_CDBEXTERN;
} else {
cmd->cmd_pkt.pkt_cdbp = (opaque_t) &cmd->cmd_cdb[0];
}
if (scbp != (caddr_t) 0) {
cmd->cmd_pkt.pkt_scbp = (opaque_t) scbp;
cmd->cmd_flags |= CFLAG_SCBEXTERN;
} else {
cmd->cmd_pkt.pkt_scbp = (opaque_t) &cmd->cmd_scb[0];
}
cmd->cmd_cdblen = cmdlen;
cmd->cmd_scblen = statuslen;
cmd->cmd_pkt.pkt_address = *ap;
}
return ((struct scsi_pkt *) cmd);
}
/*
* packet free
*/
void
scsi_std_pktfree(pkt)
struct scsi_pkt *pkt;
{
register s = splr(scsi_spl);
register struct scsi_cmd *sp = (struct scsi_cmd *) pkt;
if (sp->cmd_flags & CFLAG_CDBEXTERN) {
(void) kmem_free_intr((caddr_t) sp->cmd_pkt.pkt_cdbp,
(unsigned int) sp->cmd_cdblen);
}
if (sp->cmd_flags & CFLAG_SCBEXTERN) {
(void) kmem_free_intr((caddr_t) sp->cmd_pkt.pkt_scbp,
(unsigned int) sp->cmd_scblen);
}
/*
* free the packet.
*/
sp->cmd_pkt.pkt_ha_private = (opaque_t) scsibase;
scsibase = (struct scsi_cmd *) pkt;
if (scsi_cmdwake) {
scsi_cmdwake = 0;
wakeup((caddr_t)&scsibase);
}
while (scpq.qlen != 0) {
register func_t funcp;
funcp = scq_retrieve(&scpq);
if ((*funcp)() == 0)
break;
}
(void) splx(s);
}
/*
*
* Dma resource allocation
*
*/
#define BDVMA ((u_long) &DVMA[0])
#define EDVMA ((u_long) &DVMA[ctob(dvmasize)])
#define DVMA_ADDR(addr, count) \
(((u_long)addr) >= BDVMA && ((u_long)addr) < EDVMA && \
(((u_long)addr)+count) >= BDVMA && (((u_long)addr)+count-1) < EDVMA)
#ifdef sun4c
#define SYS_VRANGE(addr) ((u_int)addr >= (u_int) Sysbase && \
(u_int)addr < (u_int) Syslimit)
#define SYS_VADDR(bp) (((bp->b_flags & (B_PAGEIO|B_PHYS)) == 0) && \
SYS_VRANGE(bp->b_un.b_addr))
#endif sun4c
struct scsi_pkt *
scsi_std_dmaget(pkt, dmatoken, callback)
struct scsi_pkt *pkt;
opaque_t dmatoken;
int (*callback)();
{
struct buf *bp = (struct buf *) dmatoken;
struct scsi_cmd *cmd = (struct scsi_cmd *) pkt;
/*
* clear any stale flags
*/
cmd->cmd_flags &= ~(CFLAG_DMAKEEP|CFLAG_DMASEND|CFLAG_DMAVALID);
/*
* We assume that if the address is already in the range of
* kernel address DVMA..DVMA+ctob(dvmasize) that the mapping has
* already been established by someone (so we don't have to).
*
* If this is the case it is also true that we don't have
* release the mapping when we're done (i.e., when scsi_std_dmafree
* is called), so we'll mark this mapping to not be released.
*
* Also, if this is a sun4c, and the I/O is too/from the kernel
* heap, we can just use that (on a sun4c, I/O is valid for what-
* ever is in context 0 (kernel context)).
*
*/
if (DVMA_ADDR(bp->b_un.b_addr, bp->b_bcount)) {
cmd->cmd_mapping = (((u_long)bp->b_un.b_addr)-((u_long)DVMA));
cmd->cmd_flags |= CFLAG_DMAKEEP;
#ifdef sun4c
} else if (SYS_VADDR(bp)) {
/*
* I don't believe that I need to lock the address range
* down if it's in the kernel heap.
*/
cmd->cmd_mapping = (u_long) bp->b_un.b_addr;
cmd->cmd_flags |= CFLAG_DMAKEEP;
#endif sun4c
} else if (callback == SLEEP_FUNC) {
cmd->cmd_mapping =
mb_mapalloc(DMA, bp, MDR_BIGSBUS, (int (*)())0, (caddr_t)0);
} else {
#ifdef TEST_SPLS
register s, ipl;
static int last_spl = -1;
if (last_spl == -1) {
last_spl = scsi_spl;
}
s = splr(last_spl);
ipl = spltoipl(last_spl) + 1;
if (ipl > spltoipl(splvm_val)) {
last_spl = scsi_spl;
} else
last_spl = ipltospl(ipl);
#else TEST_SPLS
register int s = splr(scsi_spl);
#endif TEST_SPLS
/*
* If the DVMA wait queue is empty, or we're in the middle
* of our own callback (sfield()), call mb_mapalloc for
* a mapping. If that fails, store up our caller to be
* called back later when DVMA becomes available.
*
* If the DVMA wait queue is non-empty already, store
* up our caller so it can be called later back when DVMA
* becomes available.
*
* Now if our caller had specified NULL_FUNC, we do it
* slightly differently- if we don't get resources,
* then we arrange to field a dummy callback (that
* goes nowhere).
*/
if (scdq.incallback || scdq.qlen == 0) {
cmd->cmd_mapping = mb_mapalloc(DMA, bp,
(MB_CANTWAIT | MDR_BIGSBUS),
(callback == NULL_FUNC) ? NULL_FUNC : sfield,
(caddr_t) 0);
if (cmd->cmd_mapping == 0) {
if (callback != NULL_FUNC)
scq_store(&scdq, (func_t)callback);
(void) splx(s);
return ((struct scsi_pkt *) 0);
}
} else {
if (callback != NULL_FUNC)
scq_store(&scdq, (func_t)callback);
(void) splx(s);
return ((struct scsi_pkt *) 0);
}
(void) splx(s);
}
cmd->cmd_dmacount = bp->b_bcount;
if ((bp->b_flags & B_READ) == 0)
cmd->cmd_flags |= CFLAG_DMASEND;
cmd->cmd_flags |= CFLAG_DMAVALID;
return ((struct scsi_pkt *) cmd);
}
/*ARGSUSED*/
static int
sfield (arg)
caddr_t arg;
{
register s = splr(scsi_spl);
scdq.incallback = 1;
while (scdq.qlen != 0) {
register func_t funcp;
register u_int lastlen;
/*
* Latch up the current queue length,
* because scq_retrieve will decrement it.
*/
lastlen = scdq.qlen;
funcp = scq_retrieve(&scdq);
if ((*funcp)() == 0) {
/*
* The target driver's allocation failed. Why?
* If it failed due to packet allocation failure,
* we can continue on. If it failed due to DVMA
* allocation failure, we have to quit now and
* let the mb code know that DVMA has run out
* again. If the last dma queue length is less
* than or equal to the now current dma queue
* length, then the allocation failure was
* due to DVMA running out again.
*/
if (lastlen > scdq.qlen) {
continue;
}
scdq.incallback = 0;
(void) splx(s);
return (DVMA_RUNOUT);
}
}
scdq.incallback = 0;
(void) splx(s);
return (0);
}
void
scsi_std_dmafree(pkt)
struct scsi_pkt *pkt;
{
struct scsi_cmd *cmd = (struct scsi_cmd *) pkt;
/*
* we don't need an spl here because mb_mapfree does that for us.
*/
if ((cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
return;
}
if ((cmd->cmd_flags & CFLAG_DMAKEEP) == 0) {
#ifdef TEST_SPLS
register s, ipl;
static int last_spl = -1;
if (last_spl == -1) {
last_spl = scsi_spl;
}
s = splr(last_spl);
ipl = spltoipl(last_spl) + 1;
if (ipl > spltoipl(splvm_val)) {
last_spl = scsi_spl;
} else
last_spl = ipltospl(ipl);
mb_mapfree(DMA, (int *)&cmd->cmd_mapping);
(void) splx(s);
#else TEST_SPLS
mb_mapfree(DMA, (int *)&cmd->cmd_mapping);
#endif TEST_SPLS
#if defined(sun4c) && defined(VAC)
} else if (vac && (cmd->cmd_flags & CFLAG_DMASEND) == 0 &&
SYS_VRANGE(cmd->cmd_mapping)) {
extern u_int hat_getkpfnum();
extern void hat_vacsync();
register addr_t vacaddr;
vacaddr = (addr_t) (cmd->cmd_mapping & MMU_PAGEMASK);
while (vacaddr < (addr_t)
(cmd->cmd_mapping + cmd->cmd_dmacount)) {
hat_vacsync(hat_getkpfnum(vacaddr));
vacaddr += MMU_PAGESIZE;
}
#endif defined(sun4c) && defined(VAC)
}
cmd->cmd_flags &= ~CFLAG_DMAVALID;
cmd->cmd_mapping = cmd->cmd_dmacount = 0;
}
/*
* Store a queue element, returning if the funcp is already queued.
* Always called at splvm().
*/
static void
scq_store(scq, funcp)
register struct scq *scq;
func_t funcp;
{
register int i;
for (i = 0; i < SCQLEN; i++) {
if (scq->funcp[i] == funcp) {
return;
}
}
scq->ncalls++;
scq->qlen++;
scq->funcp[scq->qstore] = funcp;
scq->qstore = (scq->qstore + 1) & SCQLENMASK;
ASSERT(scq->qstore != scq->qretrv);
}
/*
* Retrieve the queue element at the head of the wait queue.
* Always called at splvm().
*/
static func_t
scq_retrieve(scq)
register struct scq *scq;
{
register func_t funcp;
scq->qlen--;
scq->qretrv = (scq->qretrv + 1) & SCQLENMASK;
ASSERT(scq->qretrv != scq->qstore);
funcp = scq->funcp[scq->qretrv];
scq->funcp[scq->qretrv] = NULL;
return (funcp);
}
/*
* Check a passed active data pointer for being within range
*/
int
scsi_chkdma(sp, max_xfer)
register struct scsi_cmd *sp;
register int max_xfer;
{
register u_long maxv = max_xfer;
if (sp->cmd_data < sp->cmd_mapping)
return (0);
else if (sp->cmd_data >= (sp->cmd_mapping + sp->cmd_dmacount))
return (0);
else if ((sp->cmd_data + maxv) >= sp->cmd_mapping+sp->cmd_dmacount) {
return (sp->cmd_mapping + sp->cmd_dmacount - sp->cmd_data);
} else {
return ((int)maxv);
}
}

294
sys/scsi/impl/scsi_subr.c Normal file
View File

@@ -0,0 +1,294 @@
#ident "@(#)scsi_subr.c 1.1 94/10/31 SMI"
/*
* Copyright (c) 1988, 1989, 1990 Sun Microsystems, Inc.
*/
#include <scsi/scsi.h>
/*
*
* Utility SCSI routines
*
*/
/*
* Polling support routines
*/
/*
* The polling command routine still needs work
*/
static int scsi_poll_busycnt = 60;
int
scsi_poll(pkt)
struct scsi_pkt *pkt;
{
register busy_count, rval = -1, savef;
void (*savec)();
/*
* save old flags..
*/
savef = pkt->pkt_flags;
savec = pkt->pkt_comp;
pkt->pkt_flags |= FLAG_NOINTR;
pkt->pkt_comp = scsi_pollintr;
for (busy_count = 0; busy_count < scsi_poll_busycnt; busy_count++) {
if (pkt_transport(pkt) != TRAN_ACCEPT) {
break;
}
if (pkt->pkt_reason == CMD_INCOMPLETE && pkt->pkt_state == 0) {
DELAY(10000);
} else if (pkt->pkt_reason != CMD_CMPLT) {
break;
} else if (((*pkt->pkt_scbp)&STATUS_MASK) == STATUS_BUSY) {
DELAY(1000000);
} else {
rval = 0;
break;
}
}
pkt->pkt_flags = savef;
pkt->pkt_comp = savec;
if (busy_count >= scsi_poll_busycnt && rval == 0)
return (busy_count);
else
return (rval);
}
/*ARGSUSED*/
void
scsi_pollintr(pkt)
struct scsi_pkt *pkt;
{
}
/*
* Command packaging routines (here for compactness rather than speed)
*/
void
makecom_g0(pkt, devp, flag, cmd, addr, cnt)
struct scsi_pkt *pkt;
struct scsi_device *devp;
int flag, cmd, addr, cnt;
{
MAKECOM_G0(pkt, devp, flag, cmd, addr, cnt);
}
void
makecom_g0_s(pkt, devp, flag, cmd, cnt, fixbit)
struct scsi_pkt *pkt;
struct scsi_device *devp;
int flag, cmd, cnt, fixbit;
{
MAKECOM_G0_S(pkt, devp, flag, cmd, cnt, fixbit);
}
void
makecom_g1(pkt, devp, flag, cmd, addr, cnt)
struct scsi_pkt *pkt;
struct scsi_device *devp;
int flag, cmd, addr, cnt;
{
MAKECOM_G1(pkt, devp, flag, cmd, addr, cnt);
}
void
makecom_g5(pkt, devp, flag, cmd, addr, cnt)
struct scsi_pkt *pkt;
struct scsi_device *devp;
int flag, cmd, addr, cnt;
{
MAKECOM_G5(pkt, devp, flag, cmd, addr, cnt);
}
/*
* Common iopbmap data area packet allocation routines
*/
struct scsi_pkt *
get_pktiopb(ap, datap, cdblen, statuslen, datalen, readflag, func)
struct scsi_address *ap;
int cdblen, statuslen, datalen;
caddr_t *datap;
int readflag;
int (*func)();
{
struct scsi_pkt *pkt = (struct scsi_pkt *) 0;
struct buf local;
if (func != SLEEP_FUNC && func != NULL_FUNC || !datap)
return (pkt);
*datap = (caddr_t) 0;
bzero ((caddr_t) &local, sizeof (struct buf));
if ((local.b_un.b_addr = IOPBALLOC(datalen)) == (caddr_t) 0) {
return (pkt);
} else if (readflag)
local.b_flags = B_READ;
local.b_bcount = datalen;
pkt = scsi_resalloc(ap, cdblen, statuslen, (caddr_t)&local, func);
if (!pkt) {
IOPBFREE(local.b_un.b_addr, datalen);
} else {
*datap = local.b_un.b_addr;
}
return (pkt);
}
/*
* Equivalent deallocation wrapper
*/
void
free_pktiopb(pkt, datap, datalen)
struct scsi_pkt *pkt;
caddr_t datap;
int datalen;
{
if (datap && datalen) {
IOPBFREE(datap, datalen);
}
scsi_resfree(pkt);
}
/*
* Routine to convert a transport structure into a address cookie
*/
int
scsi_cookie(tranp)
struct scsi_transport *tranp;
{
return ((int) tranp);
}
/*
* Common naming functions
*/
static char scsi_tmpname[32];
char *
scsi_dname(dtyp)
int dtyp;
{
static char *dnames[] = {
"Direct Access",
"Sequential Access",
"Printer",
"Processor",
"Write-Once/Read-Many",
"Read-Only Direct Access",
"Scanner",
"Optical",
"Changer",
"Communications"
};
if ((dtyp & DTYPE_MASK) <= DTYPE_COMM) {
return (dnames[dtyp&DTYPE_MASK]);
} else if (dtyp == DTYPE_NOTPRESENT) {
return ("Not Present");
}
return (sprintf(scsi_tmpname,
"<unknown device type 0x%x>", (u_int) dtyp));
}
char *
scsi_rname(reason)
u_char reason;
{
static char *rnames[] = {
"cmplt",
"incomplete",
"dma_derr",
"tran_err",
"reset",
"aborted",
"timeout",
"data_ovr",
"ovr",
"sts_ovr",
"badmsg",
"nomsgout",
"xid_fail",
"ide_fail",
"abort_fail",
"reject_fail",
"nop_fail",
"per_fail",
"bdr_fail",
"id_fail",
"unexpected_bus_free"
};
if (reason > CMD_UNX_BUS_FREE) {
return (sprintf(scsi_tmpname, "<unkown reason %x>", reason));
} else {
return (rnames[reason]);
}
}
char *
scsi_mname(msg)
u_char msg;
{
static char *imsgs[18] = {
"COMMAND COMPLETE",
"EXTENDED",
"SAVE DATA POINTER",
"RESTORE POINTERS",
"DISCONNECT",
"INITIATOR DETECTED ERROR",
"ABORT",
"REJECT",
"NO-OP",
"MESSAGE PARITY",
"LINKED COMMAND COMPLETE",
"LINKED COMMAND COMPLETE (W/FLAG)",
"BUS DEVICE RESET",
"ABORT TAG",
"CLEAR QUEUE",
"INITIATE RECOVERY",
"RELEASE RECOVERY",
"TERMINATE PROCESS"
};
static char *imsgs_2[4] = {
"SIMPLE QUEUE TAG",
"HEAD OF QUEUE TAG",
"ORDERED QUEUE TAG",
"IGNORE WIDE RESIDUE"
};
if (msg < 18) {
return (imsgs[msg]);
} else if (IS_IDENTIFY_MSG(msg)) {
return ("IDENTIFY");
} else if (IS_2BYTE_MSG(msg) && ((msg) & 0xF0) < 4) {
return (imsgs_2[msg & 0xF0]);
} else {
return (sprintf(scsi_tmpname, "<unknown msg 0x%x>", msg));
}
}
char *
scsi_cmd_decode(cmd, cmdvec)
u_char cmd;
register char **cmdvec;
{
while (*cmdvec != (char *) 0) {
if (cmd == (u_char) **cmdvec) {
return ((char *)((int)(*cmdvec)+1));
}
cmdvec++;
}
return (sprintf(scsi_tmpname, "<undecoded cmd 0x%x>", cmd));
}

View File

@@ -0,0 +1,25 @@
#ifndef lint
static char sccsid[] = "@(#)scsi_transport.c 1.1 94/10/31 SMI";
#endif lint
/*
* Copyright (C) 1989 Sun Microsystems, Inc.
*/
/****************************************************************
* *
* Main Transport Routines *
* *
****************************************************************/
#include <scsi/scsi.h>
int
pkt_transport(pkt)
struct scsi_pkt *pkt;
{
struct scsi_transport *tranp;
tranp = (struct scsi_transport *) pkt->pkt_address.a_cookie;
return (*tranp->tran_start)((struct scsi_cmd *)pkt);
}

67
sys/scsi/impl/sense.h Normal file
View File

@@ -0,0 +1,67 @@
#ident "@(#)sense.h 1.1 94/10/31 SMI"
/*
* Copyright (c) 1988, 1989, 1990 by Sun Microsystems, Inc.
*/
#ifndef _scsi_impl_sense_h
#define _scsi_impl_sense_h
/*
* Implementation Variant defines
* for SCSI Sense Information
*/
/*
* These are 'pseudo' sense keys for common Sun implementation driver
* detected errors. Note that they start out as being higher than the
* legal key numbers for standard SCSI.
*/
#define SUN_KEY_FATAL 0x10 /* driver, scsi handshake failure */
#define SUN_KEY_TIMEOUT 0x11 /* driver, command timeout */
#define SUN_KEY_EOF 0x12 /* driver, eof hit */
#define SUN_KEY_EOT 0x13 /* driver, eot hit */
#define SUN_KEY_LENGTH 0x14 /* driver, length error */
#define SUN_KEY_BOT 0x15 /* driver, bot hit */
#define SUN_KEY_WRONGMEDIA 0x16 /* driver, wrong tape media */
#define NUM_IMPL_SENSE_KEYS 7 /* seven extra keys */
/*
* Common sense length allocation sufficient for this implementation.
*/
#define SENSE_LENGTH \
(roundup(sizeof (struct scsi_extended_sense), sizeof (long)))
/*
* Minimum useful Sense Length value
*/
#define SUN_MIN_SENSE_LENGTH 4
/*
* Specific variants to the Extended Sense structure.
*
* Defines for:
* Emulex MD21 SCSI/ESDI Controller
* Emulex MT02 SCSI/QIC-36 Controller.
*
* 1) The Emulex controllers put error class and error code into the byte
* right after the 'additional sense length' field in Extended Sense.
*
* 2) Except that some people state that this isn't so for the MD21- only
* the MT02.
*/
#define emulex_ercl_ercd es_cmd_info[0]
/*
* 2) These are valid on Extended Sense for the MD21, FORMAT command only:
*/
#define emulex_cyl_msb es_info_1
#define emulex_cyl_lsb es_info_2
#define emulex_head_num es_info_3
#define emulex_sect_num es_info_4
#endif /* !_scsi_impl_sense_h */

39
sys/scsi/impl/services.h Normal file
View File

@@ -0,0 +1,39 @@
#ident "@(#)services.h 1.1 94/10/31 SMI"
/*
* Copyright (c) 1989, 1990 by Sun Microsystems, Inc.
*/
#ifndef _scsi_impl_services_h
#define _scsi_impl_services_h
/*
* Implementation services not classified by type
*/
#ifdef KERNEL
extern int scsi_poll();
extern void scsi_pollintr();
extern struct scsi_pkt *get_pktiopb();
extern void free_pktiopb();
extern char *scsi_dname(), *scsi_rname(), *scsi_cmd_decode(), *scsi_mname();
extern char *sprintf();
extern char *state_bits, *sense_keys[NUM_SENSE_KEYS + NUM_IMPL_SENSE_KEYS];
/*
* Common Capability Strings Array
*/
extern char *scsi_capstrings[];
#define SCSI_CAP_DMA_MAX 0
#define SCSI_CAP_MSG_OUT 1
#define SCSI_CAP_DISCONNECT 2
#define SCSI_CAP_SYNCHRONOUS 3
#define SCSI_CAP_WIDE_XFER 4
#define SCSI_CAP_PARITY 5
#define SCSI_CAP_INITIATOR_ID 6
#define SCSI_CAP_UNTAGGED_QING 7
#define SCSI_CAP_TAGGED_QING 8
#endif
#endif _scsi_impl_services_h

47
sys/scsi/impl/status.h Normal file
View File

@@ -0,0 +1,47 @@
/* @(#)status.h 1.1 94/10/31 SMI */
#ifndef _scsi_impl_status_h
#define _scsi_impl_status_h
/*
* Copyright (c) 1988, 1989 Sun Microsystems, Inc.
*/
/*
* Implementation specific SCSI status definitions
*
*/
/*
* The size of a status block (much more than is really needed...)
*/
#define STATUS_SIZE 3
/*
* This structure is in violation of the SCSI spec, but someone
* has claimed to need it.
*/
struct scsi_funky_scb { /* funky scsi status block */
u_char sts_resvd : 1, /* reserved */
#define sts_ext_st1 sts_resvd /* extended status (next byte valid) */
sts_vu7 : 1, /* vendor unique */
sts_vu6 : 1, /* vendor unique */
sts_is : 1, /* intermediate status sent */
sts_busy : 1, /* device busy or reserved */
sts_cm : 1, /* condition met */
sts_chk : 1, /* check condition */
sts_vu0 : 1; /* vendor unique */
u_char sts_ext_st2 : 1, /* extended status (next byte valid) */
: 6, /* reserved */
sts_ha_err : 1; /* host adapter detected error */
u_char sts_byte2; /* third byte */
};
#endif _scsi_impl_status_h

61
sys/scsi/impl/transport.h Normal file
View File

@@ -0,0 +1,61 @@
#ident "@(#)transport.h 1.1 94/10/31 SMI"
/*
* Copyright (c) 1989, 1990 by Sun Microsystems, Inc.
*/
#ifndef _scsi_impl_transport_h
#define _scsi_impl_transport_h
#include <scsi/scsi_types.h>
/*
* Attribution: Greg Slaughter
*/
/*
* SCSI transport structures
*
* As each Host Adapter makes itself known to the system,
* it will create and register with the library the structure
* described below. This is so that the library knows how to route
* packets, resource control requests, and capability requests
* for any particular host adapter. The 'a_cookie' field of a
* scsi_address structure made known to a Target driver will
* point to one of these transport structures.
*
* The functional interfaces defined below follow, and are expected
* to follow, the Sun SCSI specification. They are the implementation.
*
*/
struct scsi_transport {
int tran_spl; /* 'splx' interrupt mask */
int (*tran_start)(); /* transport start */
int (*tran_reset)(); /* transport reset */
int (*tran_abort)(); /* transport abort */
int (*tran_getcap)(); /* capability retrieval */
int (*tran_setcap)(); /* capability establishment */
struct scsi_pkt *(*tran_pktalloc)(); /* packet allocation */
struct scsi_pkt *(*tran_dmaget)(); /* dma allocation */
void (*tran_pktfree)(); /* packet deallocation */
void (*tran_dmafree)(); /* dma deallocation */
};
/*
* This implementation provides some 'standard' allocation and
* deallocation functions. Host Adapters may provide their own
* functions, but if they use the standard functions, they
* must use all of them.
*/
#ifdef KERNEL
extern struct scsi_pkt *scsi_std_pktalloc();
extern struct scsi_pkt *scsi_std_dmaget();
extern void scsi_std_pktfree();
extern void scsi_std_dmafree();
extern void scsi_rinit();
extern int scsi_cookie();
#endif KERNEL
#endif _scsi_impl_transport_h

57
sys/scsi/impl/types.h Normal file
View File

@@ -0,0 +1,57 @@
#ident "@(#)types.h 1.1 94/10/31 SMI"
/*
* Copyright (c) 1988, 1989, 1990 by Sun Microsystems, Inc.
*/
#ifndef _scsi_impl_types_h
#define _scsi_impl_types_h
/*
* Local Types for SCSI subsystems
*/
#include <sys/param.h>
#ifdef KERNEL
#include <sys/systm.h>
#endif KERNEL
#include <sys/dk.h>
#include <sys/buf.h>
#include <sys/conf.h>
#include <sys/user.h>
#include <sys/fcntl.h>
#include <sys/map.h>
#include <sys/vmmac.h>
#include <sys/ioctl.h>
#include <sys/uio.h>
#include <sys/syslog.h>
#ifdef KERNEL
#include <sys/kernel.h>
#endif KERNEL
#include <sys/dkbad.h>
#include <machine/pte.h>
#include <machine/psl.h>
#include <machine/mmu.h>
#ifdef KERNEL
#include <machine/cpu.h>
#endif
#include <machine/scb.h>
#include <sun/dklabel.h>
#include <sun/dkio.h>
#ifdef sun4c
#include <sundev/mbvar.h>
#else sun4c
#include <sys/buf.h>
#include <sundev/mbvar.h>
#endif sun4c
#include <sys/debug.h>
#include <scsi/impl/services.h>
#include <scsi/impl/transport.h>
#include <scsi/impl/pkt_wrapper.h>
#include <scsi/conf/autoconf.h>
#include <scsi/conf/device.h>
#endif /* !_scsi_impl_types_h */

56
sys/scsi/impl/uscsi.h Normal file
View File

@@ -0,0 +1,56 @@
/*
* @(#)uscsi.h 1.1 94/10/31 Copyright (c) 1989 Sun Microsystems, Inc.
*/
/*
*
* Defines for user SCSI commands *
*
*/
#ifndef _scsi_impl_uscsi_h
#define _scsi_impl_uscsi_h
/*
* Copyright (c) 1989 Sun Microsystems, Inc.
*/
/*
* definition for user-scsi command structure
*/
struct uscsi_cmd {
caddr_t uscsi_cdb;
int uscsi_cdblen;
caddr_t uscsi_bufaddr;
int uscsi_buflen;
unsigned char uscsi_status;
int uscsi_flags;
};
/*
* flags for uscsi_flags field
*/
#define USCSI_SILENT 0x01 /* no error messages */
#define USCSI_DIAGNOSE 0x02 /* fail if any error occurs */
#define USCSI_ISOLATE 0x04 /* isolate from normal commands */
#define USCSI_READ 0x08 /* get data from device */
#define USCSI_WRITE 0xFFF7 /* use to zero the READ bit in uscsi_flags */
/*
* User SCSI io control command
*/
#define USCSICMD _IOWR(u, 1, struct uscsi_cmd) /* user scsi command */
/*
* user scsi status bit masks
*/
#define USCSI_STATUS_GOOD 0x00
#define USCSI_STATUS_CHECK 0x02
#define USCSI_STATUS_MET 0x04
#define USCSI_STATUS_BUSY 0x08
#define USCSI_STATUS_INTERMEDIATE 0x10
#define USCSI_STATUS_RESERVATION_CONFLICT \
(USCSI_STATUS_INTERMEDIATE | USCSI_STATUS_BUSY)
#endif _uscsi_impl_uscsi_h