Merge remote-tracking branches 'regulator/fix/88pm800', 'regulator/fix/max8973',...
[linux-drm-fsl-dcu.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
49
50 #include <net/checksum.h>
51
52 #include <asm/unaligned.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
62
63 #include "sd.h"
64 #include "scsi_logging.h"
65
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
68
69 #define MY_NAME "scsi_debug"
70
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
89 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
90 #define CAPACITY_CHANGED_ASCQ 0x9
91 #define SAVING_PARAMS_UNSUP 0x39
92 #define TRANSPORT_PROBLEM 0x4b
93 #define THRESHOLD_EXCEEDED 0x5d
94 #define LOW_POWER_COND_ON 0x5e
95 #define MISCOMPARE_VERIFY_ASC 0x1d
96 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
97 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
98
99 /* Additional Sense Code Qualifier (ASCQ) */
100 #define ACK_NAK_TO 0x3
101
102
103 /* Default values for driver parameters */
104 #define DEF_NUM_HOST   1
105 #define DEF_NUM_TGTS   1
106 #define DEF_MAX_LUNS   1
107 /* With these defaults, this driver will make 1 host with 1 target
108  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
109  */
110 #define DEF_ATO 1
111 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
112 #define DEF_DEV_SIZE_MB   8
113 #define DEF_DIF 0
114 #define DEF_DIX 0
115 #define DEF_D_SENSE   0
116 #define DEF_EVERY_NTH   0
117 #define DEF_FAKE_RW     0
118 #define DEF_GUARD 0
119 #define DEF_HOST_LOCK 0
120 #define DEF_LBPU 0
121 #define DEF_LBPWS 0
122 #define DEF_LBPWS10 0
123 #define DEF_LBPRZ 1
124 #define DEF_LOWEST_ALIGNED 0
125 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
126 #define DEF_NO_LUN_0   0
127 #define DEF_NUM_PARTS   0
128 #define DEF_OPTS   0
129 #define DEF_OPT_BLKS 64
130 #define DEF_PHYSBLK_EXP 0
131 #define DEF_PTYPE   0
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB   0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
142 #define DEF_STRICT 0
143 #define DELAY_OVERRIDDEN -9999
144
145 /* bit mask values for scsi_debug_opts */
146 #define SCSI_DEBUG_OPT_NOISE   1
147 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
148 #define SCSI_DEBUG_OPT_TIMEOUT   4
149 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
150 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
151 #define SCSI_DEBUG_OPT_DIF_ERR   32
152 #define SCSI_DEBUG_OPT_DIX_ERR   64
153 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
154 #define SCSI_DEBUG_OPT_SHORT_TRANSFER   0x100
155 #define SCSI_DEBUG_OPT_Q_NOISE  0x200
156 #define SCSI_DEBUG_OPT_ALL_TSF  0x400
157 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
158 #define SCSI_DEBUG_OPT_N_WCE    0x1000
159 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
160 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
161 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
162 /* When "every_nth" > 0 then modulo "every_nth" commands:
163  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
164  *   - a RECOVERED_ERROR is simulated on successful read and write
165  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
166  *   - a TRANSPORT_ERROR is simulated on successful read and write
167  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
168  *
169  * When "every_nth" < 0 then after "- every_nth" commands:
170  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
171  *   - a RECOVERED_ERROR is simulated on successful read and write
172  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
173  *   - a TRANSPORT_ERROR is simulated on successful read and write
174  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
175  * This will continue until some other action occurs (e.g. the user
176  * writing a new value (other than -1 or 1) to every_nth via sysfs).
177  */
178
179 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
180  * priority order. In the subset implemented here lower numbers have higher
181  * priority. The UA numbers should be a sequence starting from 0 with
182  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
183 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
184 #define SDEBUG_UA_BUS_RESET 1
185 #define SDEBUG_UA_MODE_CHANGED 2
186 #define SDEBUG_UA_CAPACITY_CHANGED 3
187 #define SDEBUG_UA_LUNS_CHANGED 4
188 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
189 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
190 #define SDEBUG_NUM_UAS 7
191
192 /* for check_readiness() */
193 #define UAS_ONLY 1      /* check for UAs only */
194 #define UAS_TUR 0       /* if no UAs then check if media access possible */
195
196 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
197  * sector on read commands: */
198 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
199 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
200
201 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
202  * or "peripheral device" addressing (value 0) */
203 #define SAM2_LUN_ADDRESS_METHOD 0
204 #define SAM2_WLUN_REPORT_LUNS 0xc101
205
206 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
207  * (for response) at one time. Can be reduced by max_queue option. Command
208  * responses are not queued when delay=0 and ndelay=0. The per-device
209  * DEF_CMD_PER_LUN can be changed via sysfs:
210  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
211  * SCSI_DEBUG_CANQUEUE. */
212 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
213 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
214 #define DEF_CMD_PER_LUN  255
215
216 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
217 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
218 #endif
219
220 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
221 enum sdeb_opcode_index {
222         SDEB_I_INVALID_OPCODE = 0,
223         SDEB_I_INQUIRY = 1,
224         SDEB_I_REPORT_LUNS = 2,
225         SDEB_I_REQUEST_SENSE = 3,
226         SDEB_I_TEST_UNIT_READY = 4,
227         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
228         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
229         SDEB_I_LOG_SENSE = 7,
230         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
231         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
232         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
233         SDEB_I_START_STOP = 11,
234         SDEB_I_SERV_ACT_IN = 12,        /* 12, 16 */
235         SDEB_I_SERV_ACT_OUT = 13,       /* 12, 16 */
236         SDEB_I_MAINT_IN = 14,
237         SDEB_I_MAINT_OUT = 15,
238         SDEB_I_VERIFY = 16,             /* 10 only */
239         SDEB_I_VARIABLE_LEN = 17,
240         SDEB_I_RESERVE = 18,            /* 6, 10 */
241         SDEB_I_RELEASE = 19,            /* 6, 10 */
242         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
243         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
244         SDEB_I_ATA_PT = 22,             /* 12, 16 */
245         SDEB_I_SEND_DIAG = 23,
246         SDEB_I_UNMAP = 24,
247         SDEB_I_XDWRITEREAD = 25,        /* 10 only */
248         SDEB_I_WRITE_BUFFER = 26,
249         SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
250         SDEB_I_SYNC_CACHE = 28,         /* 10 only */
251         SDEB_I_COMP_WRITE = 29,
252         SDEB_I_LAST_ELEMENT = 30,       /* keep this last */
253 };
254
255 static const unsigned char opcode_ind_arr[256] = {
256 /* 0x0; 0x0->0x1f: 6 byte cdbs */
257         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
258             0, 0, 0, 0,
259         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
260         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
261             SDEB_I_RELEASE,
262         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
263             SDEB_I_ALLOW_REMOVAL, 0,
264 /* 0x20; 0x20->0x3f: 10 byte cdbs */
265         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
266         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
267         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
268         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
269 /* 0x40; 0x40->0x5f: 10 byte cdbs */
270         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
271         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
272         0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
273             SDEB_I_RELEASE,
274         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
275 /* 0x60; 0x60->0x7d are reserved */
276         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
277         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278         0, SDEB_I_VARIABLE_LEN,
279 /* 0x80; 0x80->0x9f: 16 byte cdbs */
280         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
281         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
282         0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
283         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
284 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
285         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
286              SDEB_I_MAINT_OUT, 0, 0, 0,
287         SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
288              0, 0, 0, 0,
289         0, 0, 0, 0, 0, 0, 0, 0,
290         0, 0, 0, 0, 0, 0, 0, 0,
291 /* 0xc0; 0xc0->0xff: vendor specific */
292         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
293         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 };
297
298 #define F_D_IN                  1
299 #define F_D_OUT                 2
300 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
301 #define F_D_UNKN                8
302 #define F_RL_WLUN_OK            0x10
303 #define F_SKIP_UA               0x20
304 #define F_DELAY_OVERR           0x40
305 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
306 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
307 #define F_INV_OP                0x200
308 #define F_FAKE_RW               0x400
309 #define F_M_ACCESS              0x800   /* media access */
310
311 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
312 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
313 #define FF_SA (F_SA_HIGH | F_SA_LOW)
314
315 struct sdebug_dev_info;
316 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
317 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
329 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
330 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
331 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
332 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
333 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
334 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
335 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
336 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
337
338 struct opcode_info_t {
339         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff
340                                  * for terminating element */
341         u8 opcode;              /* if num_attached > 0, preferred */
342         u16 sa;                 /* service action */
343         u32 flags;              /* OR-ed set of SDEB_F_* */
344         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
345         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
346         u8 len_mask[16];        /* len=len_mask[0], then mask for cdb[1]... */
347                                 /* ignore cdb bytes after position 15 */
348 };
349
350 static const struct opcode_info_t msense_iarr[1] = {
351         {0, 0x1a, 0, F_D_IN, NULL, NULL,
352             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
353 };
354
355 static const struct opcode_info_t mselect_iarr[1] = {
356         {0, 0x15, 0, F_D_OUT, NULL, NULL,
357             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
358 };
359
360 static const struct opcode_info_t read_iarr[3] = {
361         {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
362             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
363              0, 0, 0, 0} },
364         {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
365             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
366         {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
367             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
368              0xc7, 0, 0, 0, 0} },
369 };
370
371 static const struct opcode_info_t write_iarr[3] = {
372         {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
373             {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
374              0, 0, 0, 0} },
375         {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
376             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
377         {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
378             {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
379              0xc7, 0, 0, 0, 0} },
380 };
381
382 static const struct opcode_info_t sa_in_iarr[1] = {
383         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
384             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
385              0xff, 0xff, 0xff, 0, 0xc7} },
386 };
387
388 static const struct opcode_info_t vl_iarr[1] = {        /* VARIABLE LENGTH */
389         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
390             NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
391                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
392 };
393
394 static const struct opcode_info_t maint_in_iarr[2] = {
395         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
396             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
397              0xc7, 0, 0, 0, 0} },
398         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
399             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
400              0, 0} },
401 };
402
403 static const struct opcode_info_t write_same_iarr[1] = {
404         {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
405             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
406              0xff, 0xff, 0xff, 0x1f, 0xc7} },
407 };
408
409 static const struct opcode_info_t reserve_iarr[1] = {
410         {0, 0x16, 0, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
411             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
412 };
413
414 static const struct opcode_info_t release_iarr[1] = {
415         {0, 0x17, 0, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
416             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
417 };
418
419
420 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
421  * plus the terminating elements for logic that scans this table such as
422  * REPORT SUPPORTED OPERATION CODES. */
423 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
424 /* 0 */
425         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
426             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
427         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
428             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
429         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
430             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
431              0, 0} },
432         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
433             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
434         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
435             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
436         {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
437             {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
438              0} },
439         {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
440             {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
441         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
442             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
443              0, 0, 0} },
444         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
445             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
446              0, 0} },
447         {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
448             {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
449              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* READ(16) */
450 /* 10 */
451         {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
452             {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
453              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* WRITE(16) */
454         {0, 0x1b, 0, 0, resp_start_stop, NULL,          /* START STOP UNIT */
455             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
456         {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
457             {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
458              0xff, 0xff, 0xff, 0x1, 0xc7} },    /* READ CAPACITY(16) */
459         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
460             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
461         {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
462             {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
463              0} },
464         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
465             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
466         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
467             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
468         {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
469             vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
470                       0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
471         {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
472             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
473              0} },
474         {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
475             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
476              0} },
477 /* 20 */
478         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
479             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
480         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
481             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
483             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
485             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486         {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
487             {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
488         {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
489             NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
490                    0, 0, 0, 0, 0, 0} },
491         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
492             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
493              0, 0, 0, 0} },                     /* WRITE_BUFFER */
494         {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
495             write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
496                               0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
497         {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
498             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
499              0, 0, 0, 0} },
500         {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
501             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
502              0, 0xff, 0x1f, 0xc7} },            /* COMPARE AND WRITE */
503
504 /* 30 */
505         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
506             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
507 };
508
509 struct sdebug_scmd_extra_t {
510         bool inj_recovered;
511         bool inj_transport;
512         bool inj_dif;
513         bool inj_dix;
514         bool inj_short;
515 };
516
517 static int scsi_debug_add_host = DEF_NUM_HOST;
518 static int scsi_debug_ato = DEF_ATO;
519 static int scsi_debug_delay = DEF_DELAY;
520 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
521 static int scsi_debug_dif = DEF_DIF;
522 static int scsi_debug_dix = DEF_DIX;
523 static int scsi_debug_dsense = DEF_D_SENSE;
524 static int scsi_debug_every_nth = DEF_EVERY_NTH;
525 static int scsi_debug_fake_rw = DEF_FAKE_RW;
526 static unsigned int scsi_debug_guard = DEF_GUARD;
527 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
528 static int scsi_debug_max_luns = DEF_MAX_LUNS;
529 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
530 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
531 static int scsi_debug_ndelay = DEF_NDELAY;
532 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
533 static int scsi_debug_no_uld = 0;
534 static int scsi_debug_num_parts = DEF_NUM_PARTS;
535 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
536 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
537 static int scsi_debug_opts = DEF_OPTS;
538 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
539 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
540 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
541 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
542 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
543 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
544 static unsigned int scsi_debug_lbpu = DEF_LBPU;
545 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
546 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
547 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
548 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
549 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
550 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
551 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
552 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
553 static bool scsi_debug_removable = DEF_REMOVABLE;
554 static bool scsi_debug_clustering;
555 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
556 static bool scsi_debug_strict = DEF_STRICT;
557 static bool sdebug_any_injecting_opt;
558
559 static atomic_t sdebug_cmnd_count;
560 static atomic_t sdebug_completions;
561 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
562
563 #define DEV_READONLY(TGT)      (0)
564
565 static unsigned int sdebug_store_sectors;
566 static sector_t sdebug_capacity;        /* in sectors */
567
568 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
569    may still need them */
570 static int sdebug_heads;                /* heads per disk */
571 static int sdebug_cylinders_per;        /* cylinders per surface */
572 static int sdebug_sectors_per;          /* sectors per cylinder */
573
574 #define SDEBUG_MAX_PARTS 4
575
576 #define SCSI_DEBUG_MAX_CMD_LEN 32
577
578 static unsigned int scsi_debug_lbp(void)
579 {
580         return ((0 == scsi_debug_fake_rw) &&
581                 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
582 }
583
584 struct sdebug_dev_info {
585         struct list_head dev_list;
586         unsigned int channel;
587         unsigned int target;
588         u64 lun;
589         struct sdebug_host_info *sdbg_host;
590         unsigned long uas_bm[1];
591         atomic_t num_in_q;
592         char stopped;           /* TODO: should be atomic */
593         bool used;
594 };
595
596 struct sdebug_host_info {
597         struct list_head host_list;
598         struct Scsi_Host *shost;
599         struct device dev;
600         struct list_head dev_info_list;
601 };
602
603 #define to_sdebug_host(d)       \
604         container_of(d, struct sdebug_host_info, dev)
605
606 static LIST_HEAD(sdebug_host_list);
607 static DEFINE_SPINLOCK(sdebug_host_list_lock);
608
609
610 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
611         struct hrtimer hrt;     /* must be first element */
612         int qa_indx;
613 };
614
615 struct sdebug_queued_cmd {
616         /* in_use flagged by a bit in queued_in_use_bm[] */
617         struct timer_list *cmnd_timerp;
618         struct tasklet_struct *tletp;
619         struct sdebug_hrtimer *sd_hrtp;
620         struct scsi_cmnd * a_cmnd;
621 };
622 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
623 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
624
625
626 static unsigned char * fake_storep;     /* ramdisk storage */
627 static struct sd_dif_tuple *dif_storep; /* protection info */
628 static void *map_storep;                /* provisioning map */
629
630 static unsigned long map_size;
631 static int num_aborts;
632 static int num_dev_resets;
633 static int num_target_resets;
634 static int num_bus_resets;
635 static int num_host_resets;
636 static int dix_writes;
637 static int dix_reads;
638 static int dif_errors;
639
640 static DEFINE_SPINLOCK(queued_arr_lock);
641 static DEFINE_RWLOCK(atomic_rw);
642
643 static char sdebug_proc_name[] = MY_NAME;
644 static const char *my_name = MY_NAME;
645
646 static struct bus_type pseudo_lld_bus;
647
648 static struct device_driver sdebug_driverfs_driver = {
649         .name           = sdebug_proc_name,
650         .bus            = &pseudo_lld_bus,
651 };
652
653 static const int check_condition_result =
654                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
655
656 static const int illegal_condition_result =
657         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
658
659 static const int device_qfull_result =
660         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
661
662 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
663                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
664                                      0, 0, 0, 0};
665 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
666                                     0, 0, 0x2, 0x4b};
667 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
668                                    0, 0, 0x0, 0x0};
669
670 static void *fake_store(unsigned long long lba)
671 {
672         lba = do_div(lba, sdebug_store_sectors);
673
674         return fake_storep + lba * scsi_debug_sector_size;
675 }
676
677 static struct sd_dif_tuple *dif_store(sector_t sector)
678 {
679         sector = do_div(sector, sdebug_store_sectors);
680
681         return dif_storep + sector;
682 }
683
684 static int sdebug_add_adapter(void);
685 static void sdebug_remove_adapter(void);
686
687 static void sdebug_max_tgts_luns(void)
688 {
689         struct sdebug_host_info *sdbg_host;
690         struct Scsi_Host *hpnt;
691
692         spin_lock(&sdebug_host_list_lock);
693         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
694                 hpnt = sdbg_host->shost;
695                 if ((hpnt->this_id >= 0) &&
696                     (scsi_debug_num_tgts > hpnt->this_id))
697                         hpnt->max_id = scsi_debug_num_tgts + 1;
698                 else
699                         hpnt->max_id = scsi_debug_num_tgts;
700                 /* scsi_debug_max_luns; */
701                 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
702         }
703         spin_unlock(&sdebug_host_list_lock);
704 }
705
706 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
707
708 /* Set in_bit to -1 to indicate no bit position of invalid field */
709 static void
710 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
711                      int in_byte, int in_bit)
712 {
713         unsigned char *sbuff;
714         u8 sks[4];
715         int sl, asc;
716
717         sbuff = scp->sense_buffer;
718         if (!sbuff) {
719                 sdev_printk(KERN_ERR, scp->device,
720                             "%s: sense_buffer is NULL\n", __func__);
721                 return;
722         }
723         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
724         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
725         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
726                                 asc, 0);
727         memset(sks, 0, sizeof(sks));
728         sks[0] = 0x80;
729         if (c_d)
730                 sks[0] |= 0x40;
731         if (in_bit >= 0) {
732                 sks[0] |= 0x8;
733                 sks[0] |= 0x7 & in_bit;
734         }
735         put_unaligned_be16(in_byte, sks + 1);
736         if (scsi_debug_dsense) {
737                 sl = sbuff[7] + 8;
738                 sbuff[7] = sl;
739                 sbuff[sl] = 0x2;
740                 sbuff[sl + 1] = 0x6;
741                 memcpy(sbuff + sl + 4, sks, 3);
742         } else
743                 memcpy(sbuff + 15, sks, 3);
744         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
745                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
746                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
747                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
748 }
749
750 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
751 {
752         unsigned char *sbuff;
753
754         sbuff = scp->sense_buffer;
755         if (!sbuff) {
756                 sdev_printk(KERN_ERR, scp->device,
757                             "%s: sense_buffer is NULL\n", __func__);
758                 return;
759         }
760         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
761
762         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
763
764         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
765                 sdev_printk(KERN_INFO, scp->device,
766                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
767                             my_name, key, asc, asq);
768 }
769
770 static void
771 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
772 {
773         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
774 }
775
776 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
777 {
778         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
779                 if (0x1261 == cmd)
780                         sdev_printk(KERN_INFO, dev,
781                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
782                 else if (0x5331 == cmd)
783                         sdev_printk(KERN_INFO, dev,
784                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
785                                     __func__);
786                 else
787                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
788                                     __func__, cmd);
789         }
790         return -EINVAL;
791         /* return -ENOTTY; // correct return but upsets fdisk */
792 }
793
794 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
795 {
796         struct sdebug_host_info *sdhp;
797         struct sdebug_dev_info *dp;
798
799         spin_lock(&sdebug_host_list_lock);
800         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
801                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
802                         if ((devip->sdbg_host == dp->sdbg_host) &&
803                             (devip->target == dp->target))
804                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
805                 }
806         }
807         spin_unlock(&sdebug_host_list_lock);
808 }
809
810 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
811                            struct sdebug_dev_info * devip)
812 {
813         int k;
814         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
815
816         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
817         if (k != SDEBUG_NUM_UAS) {
818                 const char *cp = NULL;
819
820                 switch (k) {
821                 case SDEBUG_UA_POR:
822                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
823                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
824                         if (debug)
825                                 cp = "power on reset";
826                         break;
827                 case SDEBUG_UA_BUS_RESET:
828                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
829                                         UA_RESET_ASC, BUS_RESET_ASCQ);
830                         if (debug)
831                                 cp = "bus reset";
832                         break;
833                 case SDEBUG_UA_MODE_CHANGED:
834                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
835                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
836                         if (debug)
837                                 cp = "mode parameters changed";
838                         break;
839                 case SDEBUG_UA_CAPACITY_CHANGED:
840                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
841                                         UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
842                         if (debug)
843                                 cp = "capacity data changed";
844                         break;
845                 case SDEBUG_UA_MICROCODE_CHANGED:
846                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
847                                  TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
848                         if (debug)
849                                 cp = "microcode has been changed";
850                         break;
851                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
852                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
853                                         TARGET_CHANGED_ASC,
854                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
855                         if (debug)
856                                 cp = "microcode has been changed without reset";
857                         break;
858                 case SDEBUG_UA_LUNS_CHANGED:
859                         /*
860                          * SPC-3 behavior is to report a UNIT ATTENTION with
861                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
862                          * on the target, until a REPORT LUNS command is
863                          * received.  SPC-4 behavior is to report it only once.
864                          * NOTE:  scsi_debug_scsi_level does not use the same
865                          * values as struct scsi_device->scsi_level.
866                          */
867                         if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */
868                                 clear_luns_changed_on_target(devip);
869                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
870                                         TARGET_CHANGED_ASC,
871                                         LUNS_CHANGED_ASCQ);
872                         if (debug)
873                                 cp = "reported luns data has changed";
874                         break;
875                 default:
876                         pr_warn("%s: unexpected unit attention code=%d\n",
877                                 __func__, k);
878                         if (debug)
879                                 cp = "unknown";
880                         break;
881                 }
882                 clear_bit(k, devip->uas_bm);
883                 if (debug)
884                         sdev_printk(KERN_INFO, SCpnt->device,
885                                    "%s reports: Unit attention: %s\n",
886                                    my_name, cp);
887                 return check_condition_result;
888         }
889         if ((UAS_TUR == uas_only) && devip->stopped) {
890                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
891                                 0x2);
892                 if (debug)
893                         sdev_printk(KERN_INFO, SCpnt->device,
894                                     "%s reports: Not ready: %s\n", my_name,
895                                     "initializing command required");
896                 return check_condition_result;
897         }
898         return 0;
899 }
900
901 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
902 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
903                                 int arr_len)
904 {
905         int act_len;
906         struct scsi_data_buffer *sdb = scsi_in(scp);
907
908         if (!sdb->length)
909                 return 0;
910         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
911                 return (DID_ERROR << 16);
912
913         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
914                                       arr, arr_len);
915         sdb->resid = scsi_bufflen(scp) - act_len;
916
917         return 0;
918 }
919
920 /* Returns number of bytes fetched into 'arr' or -1 if error. */
921 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
922                                int arr_len)
923 {
924         if (!scsi_bufflen(scp))
925                 return 0;
926         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
927                 return -1;
928
929         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
930 }
931
932
933 static const char * inq_vendor_id = "Linux   ";
934 static const char * inq_product_id = "scsi_debug      ";
935 static const char *inq_product_rev = "0184";    /* version less '.' */
936
937 /* Device identification VPD page. Returns number of bytes placed in arr */
938 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
939                            int target_dev_id, int dev_id_num,
940                            const char * dev_id_str,
941                            int dev_id_str_len)
942 {
943         int num, port_a;
944         char b[32];
945
946         port_a = target_dev_id + 1;
947         /* T10 vendor identifier field format (faked) */
948         arr[0] = 0x2;   /* ASCII */
949         arr[1] = 0x1;
950         arr[2] = 0x0;
951         memcpy(&arr[4], inq_vendor_id, 8);
952         memcpy(&arr[12], inq_product_id, 16);
953         memcpy(&arr[28], dev_id_str, dev_id_str_len);
954         num = 8 + 16 + dev_id_str_len;
955         arr[3] = num;
956         num += 4;
957         if (dev_id_num >= 0) {
958                 /* NAA-5, Logical unit identifier (binary) */
959                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
960                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
961                 arr[num++] = 0x0;
962                 arr[num++] = 0x8;
963                 arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
964                 arr[num++] = 0x33;
965                 arr[num++] = 0x33;
966                 arr[num++] = 0x30;
967                 arr[num++] = (dev_id_num >> 24);
968                 arr[num++] = (dev_id_num >> 16) & 0xff;
969                 arr[num++] = (dev_id_num >> 8) & 0xff;
970                 arr[num++] = dev_id_num & 0xff;
971                 /* Target relative port number */
972                 arr[num++] = 0x61;      /* proto=sas, binary */
973                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
974                 arr[num++] = 0x0;       /* reserved */
975                 arr[num++] = 0x4;       /* length */
976                 arr[num++] = 0x0;       /* reserved */
977                 arr[num++] = 0x0;       /* reserved */
978                 arr[num++] = 0x0;
979                 arr[num++] = 0x1;       /* relative port A */
980         }
981         /* NAA-5, Target port identifier */
982         arr[num++] = 0x61;      /* proto=sas, binary */
983         arr[num++] = 0x93;      /* piv=1, target port, naa */
984         arr[num++] = 0x0;
985         arr[num++] = 0x8;
986         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
987         arr[num++] = 0x22;
988         arr[num++] = 0x22;
989         arr[num++] = 0x20;
990         arr[num++] = (port_a >> 24);
991         arr[num++] = (port_a >> 16) & 0xff;
992         arr[num++] = (port_a >> 8) & 0xff;
993         arr[num++] = port_a & 0xff;
994         /* NAA-5, Target port group identifier */
995         arr[num++] = 0x61;      /* proto=sas, binary */
996         arr[num++] = 0x95;      /* piv=1, target port group id */
997         arr[num++] = 0x0;
998         arr[num++] = 0x4;
999         arr[num++] = 0;
1000         arr[num++] = 0;
1001         arr[num++] = (port_group_id >> 8) & 0xff;
1002         arr[num++] = port_group_id & 0xff;
1003         /* NAA-5, Target device identifier */
1004         arr[num++] = 0x61;      /* proto=sas, binary */
1005         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1006         arr[num++] = 0x0;
1007         arr[num++] = 0x8;
1008         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
1009         arr[num++] = 0x22;
1010         arr[num++] = 0x22;
1011         arr[num++] = 0x20;
1012         arr[num++] = (target_dev_id >> 24);
1013         arr[num++] = (target_dev_id >> 16) & 0xff;
1014         arr[num++] = (target_dev_id >> 8) & 0xff;
1015         arr[num++] = target_dev_id & 0xff;
1016         /* SCSI name string: Target device identifier */
1017         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1018         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1019         arr[num++] = 0x0;
1020         arr[num++] = 24;
1021         memcpy(arr + num, "naa.52222220", 12);
1022         num += 12;
1023         snprintf(b, sizeof(b), "%08X", target_dev_id);
1024         memcpy(arr + num, b, 8);
1025         num += 8;
1026         memset(arr + num, 0, 4);
1027         num += 4;
1028         return num;
1029 }
1030
1031
1032 static unsigned char vpd84_data[] = {
1033 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1034     0x22,0x22,0x22,0x0,0xbb,0x1,
1035     0x22,0x22,0x22,0x0,0xbb,0x2,
1036 };
1037
1038 /*  Software interface identification VPD page */
1039 static int inquiry_evpd_84(unsigned char * arr)
1040 {
1041         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1042         return sizeof(vpd84_data);
1043 }
1044
1045 /* Management network addresses VPD page */
1046 static int inquiry_evpd_85(unsigned char * arr)
1047 {
1048         int num = 0;
1049         const char * na1 = "https://www.kernel.org/config";
1050         const char * na2 = "http://www.kernel.org/log";
1051         int plen, olen;
1052
1053         arr[num++] = 0x1;       /* lu, storage config */
1054         arr[num++] = 0x0;       /* reserved */
1055         arr[num++] = 0x0;
1056         olen = strlen(na1);
1057         plen = olen + 1;
1058         if (plen % 4)
1059                 plen = ((plen / 4) + 1) * 4;
1060         arr[num++] = plen;      /* length, null termianted, padded */
1061         memcpy(arr + num, na1, olen);
1062         memset(arr + num + olen, 0, plen - olen);
1063         num += plen;
1064
1065         arr[num++] = 0x4;       /* lu, logging */
1066         arr[num++] = 0x0;       /* reserved */
1067         arr[num++] = 0x0;
1068         olen = strlen(na2);
1069         plen = olen + 1;
1070         if (plen % 4)
1071                 plen = ((plen / 4) + 1) * 4;
1072         arr[num++] = plen;      /* length, null terminated, padded */
1073         memcpy(arr + num, na2, olen);
1074         memset(arr + num + olen, 0, plen - olen);
1075         num += plen;
1076
1077         return num;
1078 }
1079
1080 /* SCSI ports VPD page */
1081 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1082 {
1083         int num = 0;
1084         int port_a, port_b;
1085
1086         port_a = target_dev_id + 1;
1087         port_b = port_a + 1;
1088         arr[num++] = 0x0;       /* reserved */
1089         arr[num++] = 0x0;       /* reserved */
1090         arr[num++] = 0x0;
1091         arr[num++] = 0x1;       /* relative port 1 (primary) */
1092         memset(arr + num, 0, 6);
1093         num += 6;
1094         arr[num++] = 0x0;
1095         arr[num++] = 12;        /* length tp descriptor */
1096         /* naa-5 target port identifier (A) */
1097         arr[num++] = 0x61;      /* proto=sas, binary */
1098         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1099         arr[num++] = 0x0;       /* reserved */
1100         arr[num++] = 0x8;       /* length */
1101         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1102         arr[num++] = 0x22;
1103         arr[num++] = 0x22;
1104         arr[num++] = 0x20;
1105         arr[num++] = (port_a >> 24);
1106         arr[num++] = (port_a >> 16) & 0xff;
1107         arr[num++] = (port_a >> 8) & 0xff;
1108         arr[num++] = port_a & 0xff;
1109
1110         arr[num++] = 0x0;       /* reserved */
1111         arr[num++] = 0x0;       /* reserved */
1112         arr[num++] = 0x0;
1113         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1114         memset(arr + num, 0, 6);
1115         num += 6;
1116         arr[num++] = 0x0;
1117         arr[num++] = 12;        /* length tp descriptor */
1118         /* naa-5 target port identifier (B) */
1119         arr[num++] = 0x61;      /* proto=sas, binary */
1120         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1121         arr[num++] = 0x0;       /* reserved */
1122         arr[num++] = 0x8;       /* length */
1123         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1124         arr[num++] = 0x22;
1125         arr[num++] = 0x22;
1126         arr[num++] = 0x20;
1127         arr[num++] = (port_b >> 24);
1128         arr[num++] = (port_b >> 16) & 0xff;
1129         arr[num++] = (port_b >> 8) & 0xff;
1130         arr[num++] = port_b & 0xff;
1131
1132         return num;
1133 }
1134
1135
1136 static unsigned char vpd89_data[] = {
1137 /* from 4th byte */ 0,0,0,0,
1138 'l','i','n','u','x',' ',' ',' ',
1139 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1140 '1','2','3','4',
1141 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1142 0xec,0,0,0,
1143 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1144 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1145 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1146 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1147 0x53,0x41,
1148 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1149 0x20,0x20,
1150 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1151 0x10,0x80,
1152 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1153 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1154 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1155 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1156 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1157 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1158 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1159 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1160 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1161 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1162 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1163 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1164 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1165 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1166 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1167 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1168 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1169 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1170 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1171 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1172 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1173 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1174 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1175 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1176 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1177 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1178 };
1179
1180 /* ATA Information VPD page */
1181 static int inquiry_evpd_89(unsigned char * arr)
1182 {
1183         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1184         return sizeof(vpd89_data);
1185 }
1186
1187
1188 static unsigned char vpdb0_data[] = {
1189         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1190         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1191         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1192         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1193 };
1194
1195 /* Block limits VPD page (SBC-3) */
1196 static int inquiry_evpd_b0(unsigned char * arr)
1197 {
1198         unsigned int gran;
1199
1200         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1201
1202         /* Optimal transfer length granularity */
1203         gran = 1 << scsi_debug_physblk_exp;
1204         arr[2] = (gran >> 8) & 0xff;
1205         arr[3] = gran & 0xff;
1206
1207         /* Maximum Transfer Length */
1208         if (sdebug_store_sectors > 0x400) {
1209                 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1210                 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1211                 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1212                 arr[7] = sdebug_store_sectors & 0xff;
1213         }
1214
1215         /* Optimal Transfer Length */
1216         put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1217
1218         if (scsi_debug_lbpu) {
1219                 /* Maximum Unmap LBA Count */
1220                 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1221
1222                 /* Maximum Unmap Block Descriptor Count */
1223                 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1224         }
1225
1226         /* Unmap Granularity Alignment */
1227         if (scsi_debug_unmap_alignment) {
1228                 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1229                 arr[28] |= 0x80; /* UGAVALID */
1230         }
1231
1232         /* Optimal Unmap Granularity */
1233         put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1234
1235         /* Maximum WRITE SAME Length */
1236         put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1237
1238         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1239
1240         return sizeof(vpdb0_data);
1241 }
1242
1243 /* Block device characteristics VPD page (SBC-3) */
1244 static int inquiry_evpd_b1(unsigned char *arr)
1245 {
1246         memset(arr, 0, 0x3c);
1247         arr[0] = 0;
1248         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1249         arr[2] = 0;
1250         arr[3] = 5;     /* less than 1.8" */
1251
1252         return 0x3c;
1253 }
1254
1255 /* Logical block provisioning VPD page (SBC-3) */
1256 static int inquiry_evpd_b2(unsigned char *arr)
1257 {
1258         memset(arr, 0, 0x4);
1259         arr[0] = 0;                     /* threshold exponent */
1260
1261         if (scsi_debug_lbpu)
1262                 arr[1] = 1 << 7;
1263
1264         if (scsi_debug_lbpws)
1265                 arr[1] |= 1 << 6;
1266
1267         if (scsi_debug_lbpws10)
1268                 arr[1] |= 1 << 5;
1269
1270         if (scsi_debug_lbprz)
1271                 arr[1] |= 1 << 2;
1272
1273         return 0x4;
1274 }
1275
1276 #define SDEBUG_LONG_INQ_SZ 96
1277 #define SDEBUG_MAX_INQ_ARR_SZ 584
1278
1279 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1280 {
1281         unsigned char pq_pdt;
1282         unsigned char * arr;
1283         unsigned char *cmd = scp->cmnd;
1284         int alloc_len, n, ret;
1285         bool have_wlun;
1286
1287         alloc_len = (cmd[3] << 8) + cmd[4];
1288         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1289         if (! arr)
1290                 return DID_REQUEUE << 16;
1291         have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1292         if (have_wlun)
1293                 pq_pdt = 0x1e;  /* present, wlun */
1294         else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1295                 pq_pdt = 0x7f;  /* not present, no device type */
1296         else
1297                 pq_pdt = (scsi_debug_ptype & 0x1f);
1298         arr[0] = pq_pdt;
1299         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1300                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1301                 kfree(arr);
1302                 return check_condition_result;
1303         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1304                 int lu_id_num, port_group_id, target_dev_id, len;
1305                 char lu_id_str[6];
1306                 int host_no = devip->sdbg_host->shost->host_no;
1307                 
1308                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1309                     (devip->channel & 0x7f);
1310                 if (0 == scsi_debug_vpd_use_hostno)
1311                         host_no = 0;
1312                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1313                             (devip->target * 1000) + devip->lun);
1314                 target_dev_id = ((host_no + 1) * 2000) +
1315                                  (devip->target * 1000) - 3;
1316                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1317                 if (0 == cmd[2]) { /* supported vital product data pages */
1318                         arr[1] = cmd[2];        /*sanity */
1319                         n = 4;
1320                         arr[n++] = 0x0;   /* this page */
1321                         arr[n++] = 0x80;  /* unit serial number */
1322                         arr[n++] = 0x83;  /* device identification */
1323                         arr[n++] = 0x84;  /* software interface ident. */
1324                         arr[n++] = 0x85;  /* management network addresses */
1325                         arr[n++] = 0x86;  /* extended inquiry */
1326                         arr[n++] = 0x87;  /* mode page policy */
1327                         arr[n++] = 0x88;  /* SCSI ports */
1328                         arr[n++] = 0x89;  /* ATA information */
1329                         arr[n++] = 0xb0;  /* Block limits (SBC) */
1330                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1331                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1332                                 arr[n++] = 0xb2;
1333                         arr[3] = n - 4;   /* number of supported VPD pages */
1334                 } else if (0x80 == cmd[2]) { /* unit serial number */
1335                         arr[1] = cmd[2];        /*sanity */
1336                         arr[3] = len;
1337                         memcpy(&arr[4], lu_id_str, len);
1338                 } else if (0x83 == cmd[2]) { /* device identification */
1339                         arr[1] = cmd[2];        /*sanity */
1340                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1341                                                  target_dev_id, lu_id_num,
1342                                                  lu_id_str, len);
1343                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1344                         arr[1] = cmd[2];        /*sanity */
1345                         arr[3] = inquiry_evpd_84(&arr[4]);
1346                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1347                         arr[1] = cmd[2];        /*sanity */
1348                         arr[3] = inquiry_evpd_85(&arr[4]);
1349                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1350                         arr[1] = cmd[2];        /*sanity */
1351                         arr[3] = 0x3c;  /* number of following entries */
1352                         if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1353                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1354                         else if (scsi_debug_dif)
1355                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1356                         else
1357                                 arr[4] = 0x0;   /* no protection stuff */
1358                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1359                 } else if (0x87 == cmd[2]) { /* mode page policy */
1360                         arr[1] = cmd[2];        /*sanity */
1361                         arr[3] = 0x8;   /* number of following entries */
1362                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1363                         arr[6] = 0x80;  /* mlus, shared */
1364                         arr[8] = 0x18;   /* protocol specific lu */
1365                         arr[10] = 0x82;  /* mlus, per initiator port */
1366                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1367                         arr[1] = cmd[2];        /*sanity */
1368                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1369                 } else if (0x89 == cmd[2]) { /* ATA information */
1370                         arr[1] = cmd[2];        /*sanity */
1371                         n = inquiry_evpd_89(&arr[4]);
1372                         arr[2] = (n >> 8);
1373                         arr[3] = (n & 0xff);
1374                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1375                         arr[1] = cmd[2];        /*sanity */
1376                         arr[3] = inquiry_evpd_b0(&arr[4]);
1377                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1378                         arr[1] = cmd[2];        /*sanity */
1379                         arr[3] = inquiry_evpd_b1(&arr[4]);
1380                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1381                         arr[1] = cmd[2];        /*sanity */
1382                         arr[3] = inquiry_evpd_b2(&arr[4]);
1383                 } else {
1384                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1385                         kfree(arr);
1386                         return check_condition_result;
1387                 }
1388                 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1389                 ret = fill_from_dev_buffer(scp, arr,
1390                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1391                 kfree(arr);
1392                 return ret;
1393         }
1394         /* drops through here for a standard inquiry */
1395         arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
1396         arr[2] = scsi_debug_scsi_level;
1397         arr[3] = 2;    /* response_data_format==2 */
1398         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1399         arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1400         if (0 == scsi_debug_vpd_use_hostno)
1401                 arr[5] = 0x10; /* claim: implicit TGPS */
1402         arr[6] = 0x10; /* claim: MultiP */
1403         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1404         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1405         memcpy(&arr[8], inq_vendor_id, 8);
1406         memcpy(&arr[16], inq_product_id, 16);
1407         memcpy(&arr[32], inq_product_rev, 4);
1408         /* version descriptors (2 bytes each) follow */
1409         arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1410         arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1411         n = 62;
1412         if (scsi_debug_ptype == 0) {
1413                 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1414         } else if (scsi_debug_ptype == 1) {
1415                 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1416         }
1417         arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1418         ret = fill_from_dev_buffer(scp, arr,
1419                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1420         kfree(arr);
1421         return ret;
1422 }
1423
1424 static int resp_requests(struct scsi_cmnd * scp,
1425                          struct sdebug_dev_info * devip)
1426 {
1427         unsigned char * sbuff;
1428         unsigned char *cmd = scp->cmnd;
1429         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1430         bool dsense, want_dsense;
1431         int len = 18;
1432
1433         memset(arr, 0, sizeof(arr));
1434         dsense = !!(cmd[1] & 1);
1435         want_dsense = dsense || scsi_debug_dsense;
1436         sbuff = scp->sense_buffer;
1437         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1438                 if (dsense) {
1439                         arr[0] = 0x72;
1440                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1441                         arr[2] = THRESHOLD_EXCEEDED;
1442                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1443                         len = 8;
1444                 } else {
1445                         arr[0] = 0x70;
1446                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1447                         arr[7] = 0xa;           /* 18 byte sense buffer */
1448                         arr[12] = THRESHOLD_EXCEEDED;
1449                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1450                 }
1451         } else {
1452                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1453                 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1454                         ;       /* have sense and formats match */
1455                 else if (arr[0] <= 0x70) {
1456                         if (dsense) {
1457                                 memset(arr, 0, 8);
1458                                 arr[0] = 0x72;
1459                                 len = 8;
1460                         } else {
1461                                 memset(arr, 0, 18);
1462                                 arr[0] = 0x70;
1463                                 arr[7] = 0xa;
1464                         }
1465                 } else if (dsense) {
1466                         memset(arr, 0, 8);
1467                         arr[0] = 0x72;
1468                         arr[1] = sbuff[2];     /* sense key */
1469                         arr[2] = sbuff[12];    /* asc */
1470                         arr[3] = sbuff[13];    /* ascq */
1471                         len = 8;
1472                 } else {
1473                         memset(arr, 0, 18);
1474                         arr[0] = 0x70;
1475                         arr[2] = sbuff[1];
1476                         arr[7] = 0xa;
1477                         arr[12] = sbuff[1];
1478                         arr[13] = sbuff[3];
1479                 }
1480
1481         }
1482         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1483         return fill_from_dev_buffer(scp, arr, len);
1484 }
1485
1486 static int resp_start_stop(struct scsi_cmnd * scp,
1487                            struct sdebug_dev_info * devip)
1488 {
1489         unsigned char *cmd = scp->cmnd;
1490         int power_cond, start;
1491
1492         power_cond = (cmd[4] & 0xf0) >> 4;
1493         if (power_cond) {
1494                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1495                 return check_condition_result;
1496         }
1497         start = cmd[4] & 1;
1498         if (start == devip->stopped)
1499                 devip->stopped = !start;
1500         return 0;
1501 }
1502
1503 static sector_t get_sdebug_capacity(void)
1504 {
1505         if (scsi_debug_virtual_gb > 0)
1506                 return (sector_t)scsi_debug_virtual_gb *
1507                         (1073741824 / scsi_debug_sector_size);
1508         else
1509                 return sdebug_store_sectors;
1510 }
1511
1512 #define SDEBUG_READCAP_ARR_SZ 8
1513 static int resp_readcap(struct scsi_cmnd * scp,
1514                         struct sdebug_dev_info * devip)
1515 {
1516         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1517         unsigned int capac;
1518
1519         /* following just in case virtual_gb changed */
1520         sdebug_capacity = get_sdebug_capacity();
1521         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1522         if (sdebug_capacity < 0xffffffff) {
1523                 capac = (unsigned int)sdebug_capacity - 1;
1524                 arr[0] = (capac >> 24);
1525                 arr[1] = (capac >> 16) & 0xff;
1526                 arr[2] = (capac >> 8) & 0xff;
1527                 arr[3] = capac & 0xff;
1528         } else {
1529                 arr[0] = 0xff;
1530                 arr[1] = 0xff;
1531                 arr[2] = 0xff;
1532                 arr[3] = 0xff;
1533         }
1534         arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1535         arr[7] = scsi_debug_sector_size & 0xff;
1536         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1537 }
1538
1539 #define SDEBUG_READCAP16_ARR_SZ 32
1540 static int resp_readcap16(struct scsi_cmnd * scp,
1541                           struct sdebug_dev_info * devip)
1542 {
1543         unsigned char *cmd = scp->cmnd;
1544         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1545         unsigned long long capac;
1546         int k, alloc_len;
1547
1548         alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1549                      + cmd[13]);
1550         /* following just in case virtual_gb changed */
1551         sdebug_capacity = get_sdebug_capacity();
1552         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1553         capac = sdebug_capacity - 1;
1554         for (k = 0; k < 8; ++k, capac >>= 8)
1555                 arr[7 - k] = capac & 0xff;
1556         arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1557         arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1558         arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1559         arr[11] = scsi_debug_sector_size & 0xff;
1560         arr[13] = scsi_debug_physblk_exp & 0xf;
1561         arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1562
1563         if (scsi_debug_lbp()) {
1564                 arr[14] |= 0x80; /* LBPME */
1565                 if (scsi_debug_lbprz)
1566                         arr[14] |= 0x40; /* LBPRZ */
1567         }
1568
1569         arr[15] = scsi_debug_lowest_aligned & 0xff;
1570
1571         if (scsi_debug_dif) {
1572                 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1573                 arr[12] |= 1; /* PROT_EN */
1574         }
1575
1576         return fill_from_dev_buffer(scp, arr,
1577                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1578 }
1579
1580 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1581
1582 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1583                               struct sdebug_dev_info * devip)
1584 {
1585         unsigned char *cmd = scp->cmnd;
1586         unsigned char * arr;
1587         int host_no = devip->sdbg_host->shost->host_no;
1588         int n, ret, alen, rlen;
1589         int port_group_a, port_group_b, port_a, port_b;
1590
1591         alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1592                 + cmd[9]);
1593
1594         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1595         if (! arr)
1596                 return DID_REQUEUE << 16;
1597         /*
1598          * EVPD page 0x88 states we have two ports, one
1599          * real and a fake port with no device connected.
1600          * So we create two port groups with one port each
1601          * and set the group with port B to unavailable.
1602          */
1603         port_a = 0x1; /* relative port A */
1604         port_b = 0x2; /* relative port B */
1605         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1606             (devip->channel & 0x7f);
1607         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1608             (devip->channel & 0x7f) + 0x80;
1609
1610         /*
1611          * The asymmetric access state is cycled according to the host_id.
1612          */
1613         n = 4;
1614         if (0 == scsi_debug_vpd_use_hostno) {
1615             arr[n++] = host_no % 3; /* Asymm access state */
1616             arr[n++] = 0x0F; /* claim: all states are supported */
1617         } else {
1618             arr[n++] = 0x0; /* Active/Optimized path */
1619             arr[n++] = 0x01; /* claim: only support active/optimized paths */
1620         }
1621         arr[n++] = (port_group_a >> 8) & 0xff;
1622         arr[n++] = port_group_a & 0xff;
1623         arr[n++] = 0;    /* Reserved */
1624         arr[n++] = 0;    /* Status code */
1625         arr[n++] = 0;    /* Vendor unique */
1626         arr[n++] = 0x1;  /* One port per group */
1627         arr[n++] = 0;    /* Reserved */
1628         arr[n++] = 0;    /* Reserved */
1629         arr[n++] = (port_a >> 8) & 0xff;
1630         arr[n++] = port_a & 0xff;
1631         arr[n++] = 3;    /* Port unavailable */
1632         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1633         arr[n++] = (port_group_b >> 8) & 0xff;
1634         arr[n++] = port_group_b & 0xff;
1635         arr[n++] = 0;    /* Reserved */
1636         arr[n++] = 0;    /* Status code */
1637         arr[n++] = 0;    /* Vendor unique */
1638         arr[n++] = 0x1;  /* One port per group */
1639         arr[n++] = 0;    /* Reserved */
1640         arr[n++] = 0;    /* Reserved */
1641         arr[n++] = (port_b >> 8) & 0xff;
1642         arr[n++] = port_b & 0xff;
1643
1644         rlen = n - 4;
1645         arr[0] = (rlen >> 24) & 0xff;
1646         arr[1] = (rlen >> 16) & 0xff;
1647         arr[2] = (rlen >> 8) & 0xff;
1648         arr[3] = rlen & 0xff;
1649
1650         /*
1651          * Return the smallest value of either
1652          * - The allocated length
1653          * - The constructed command length
1654          * - The maximum array size
1655          */
1656         rlen = min(alen,n);
1657         ret = fill_from_dev_buffer(scp, arr,
1658                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1659         kfree(arr);
1660         return ret;
1661 }
1662
1663 static int
1664 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1665 {
1666         bool rctd;
1667         u8 reporting_opts, req_opcode, sdeb_i, supp;
1668         u16 req_sa, u;
1669         u32 alloc_len, a_len;
1670         int k, offset, len, errsts, count, bump, na;
1671         const struct opcode_info_t *oip;
1672         const struct opcode_info_t *r_oip;
1673         u8 *arr;
1674         u8 *cmd = scp->cmnd;
1675
1676         rctd = !!(cmd[2] & 0x80);
1677         reporting_opts = cmd[2] & 0x7;
1678         req_opcode = cmd[3];
1679         req_sa = get_unaligned_be16(cmd + 4);
1680         alloc_len = get_unaligned_be32(cmd + 6);
1681         if (alloc_len < 4 || alloc_len > 0xffff) {
1682                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1683                 return check_condition_result;
1684         }
1685         if (alloc_len > 8192)
1686                 a_len = 8192;
1687         else
1688                 a_len = alloc_len;
1689         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1690         if (NULL == arr) {
1691                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1692                                 INSUFF_RES_ASCQ);
1693                 return check_condition_result;
1694         }
1695         switch (reporting_opts) {
1696         case 0: /* all commands */
1697                 /* count number of commands */
1698                 for (count = 0, oip = opcode_info_arr;
1699                      oip->num_attached != 0xff; ++oip) {
1700                         if (F_INV_OP & oip->flags)
1701                                 continue;
1702                         count += (oip->num_attached + 1);
1703                 }
1704                 bump = rctd ? 20 : 8;
1705                 put_unaligned_be32(count * bump, arr);
1706                 for (offset = 4, oip = opcode_info_arr;
1707                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1708                         if (F_INV_OP & oip->flags)
1709                                 continue;
1710                         na = oip->num_attached;
1711                         arr[offset] = oip->opcode;
1712                         put_unaligned_be16(oip->sa, arr + offset + 2);
1713                         if (rctd)
1714                                 arr[offset + 5] |= 0x2;
1715                         if (FF_SA & oip->flags)
1716                                 arr[offset + 5] |= 0x1;
1717                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1718                         if (rctd)
1719                                 put_unaligned_be16(0xa, arr + offset + 8);
1720                         r_oip = oip;
1721                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1722                                 if (F_INV_OP & oip->flags)
1723                                         continue;
1724                                 offset += bump;
1725                                 arr[offset] = oip->opcode;
1726                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1727                                 if (rctd)
1728                                         arr[offset + 5] |= 0x2;
1729                                 if (FF_SA & oip->flags)
1730                                         arr[offset + 5] |= 0x1;
1731                                 put_unaligned_be16(oip->len_mask[0],
1732                                                    arr + offset + 6);
1733                                 if (rctd)
1734                                         put_unaligned_be16(0xa,
1735                                                            arr + offset + 8);
1736                         }
1737                         oip = r_oip;
1738                         offset += bump;
1739                 }
1740                 break;
1741         case 1: /* one command: opcode only */
1742         case 2: /* one command: opcode plus service action */
1743         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1744                 sdeb_i = opcode_ind_arr[req_opcode];
1745                 oip = &opcode_info_arr[sdeb_i];
1746                 if (F_INV_OP & oip->flags) {
1747                         supp = 1;
1748                         offset = 4;
1749                 } else {
1750                         if (1 == reporting_opts) {
1751                                 if (FF_SA & oip->flags) {
1752                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1753                                                              2, 2);
1754                                         kfree(arr);
1755                                         return check_condition_result;
1756                                 }
1757                                 req_sa = 0;
1758                         } else if (2 == reporting_opts &&
1759                                    0 == (FF_SA & oip->flags)) {
1760                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1761                                 kfree(arr);     /* point at requested sa */
1762                                 return check_condition_result;
1763                         }
1764                         if (0 == (FF_SA & oip->flags) &&
1765                             req_opcode == oip->opcode)
1766                                 supp = 3;
1767                         else if (0 == (FF_SA & oip->flags)) {
1768                                 na = oip->num_attached;
1769                                 for (k = 0, oip = oip->arrp; k < na;
1770                                      ++k, ++oip) {
1771                                         if (req_opcode == oip->opcode)
1772                                                 break;
1773                                 }
1774                                 supp = (k >= na) ? 1 : 3;
1775                         } else if (req_sa != oip->sa) {
1776                                 na = oip->num_attached;
1777                                 for (k = 0, oip = oip->arrp; k < na;
1778                                      ++k, ++oip) {
1779                                         if (req_sa == oip->sa)
1780                                                 break;
1781                                 }
1782                                 supp = (k >= na) ? 1 : 3;
1783                         } else
1784                                 supp = 3;
1785                         if (3 == supp) {
1786                                 u = oip->len_mask[0];
1787                                 put_unaligned_be16(u, arr + 2);
1788                                 arr[4] = oip->opcode;
1789                                 for (k = 1; k < u; ++k)
1790                                         arr[4 + k] = (k < 16) ?
1791                                                  oip->len_mask[k] : 0xff;
1792                                 offset = 4 + u;
1793                         } else
1794                                 offset = 4;
1795                 }
1796                 arr[1] = (rctd ? 0x80 : 0) | supp;
1797                 if (rctd) {
1798                         put_unaligned_be16(0xa, arr + offset);
1799                         offset += 12;
1800                 }
1801                 break;
1802         default:
1803                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1804                 kfree(arr);
1805                 return check_condition_result;
1806         }
1807         offset = (offset < a_len) ? offset : a_len;
1808         len = (offset < alloc_len) ? offset : alloc_len;
1809         errsts = fill_from_dev_buffer(scp, arr, len);
1810         kfree(arr);
1811         return errsts;
1812 }
1813
1814 static int
1815 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1816 {
1817         bool repd;
1818         u32 alloc_len, len;
1819         u8 arr[16];
1820         u8 *cmd = scp->cmnd;
1821
1822         memset(arr, 0, sizeof(arr));
1823         repd = !!(cmd[2] & 0x80);
1824         alloc_len = get_unaligned_be32(cmd + 6);
1825         if (alloc_len < 4) {
1826                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1827                 return check_condition_result;
1828         }
1829         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1830         arr[1] = 0x1;           /* ITNRS */
1831         if (repd) {
1832                 arr[3] = 0xc;
1833                 len = 16;
1834         } else
1835                 len = 4;
1836
1837         len = (len < alloc_len) ? len : alloc_len;
1838         return fill_from_dev_buffer(scp, arr, len);
1839 }
1840
1841 /* <<Following mode page info copied from ST318451LW>> */
1842
1843 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1844 {       /* Read-Write Error Recovery page for mode_sense */
1845         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1846                                         5, 0, 0xff, 0xff};
1847
1848         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1849         if (1 == pcontrol)
1850                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1851         return sizeof(err_recov_pg);
1852 }
1853
1854 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1855 {       /* Disconnect-Reconnect page for mode_sense */
1856         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1857                                          0, 0, 0, 0, 0, 0, 0, 0};
1858
1859         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1860         if (1 == pcontrol)
1861                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1862         return sizeof(disconnect_pg);
1863 }
1864
1865 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1866 {       /* Format device page for mode_sense */
1867         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1868                                      0, 0, 0, 0, 0, 0, 0, 0,
1869                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1870
1871         memcpy(p, format_pg, sizeof(format_pg));
1872         p[10] = (sdebug_sectors_per >> 8) & 0xff;
1873         p[11] = sdebug_sectors_per & 0xff;
1874         p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1875         p[13] = scsi_debug_sector_size & 0xff;
1876         if (scsi_debug_removable)
1877                 p[20] |= 0x20; /* should agree with INQUIRY */
1878         if (1 == pcontrol)
1879                 memset(p + 2, 0, sizeof(format_pg) - 2);
1880         return sizeof(format_pg);
1881 }
1882
1883 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1884 {       /* Caching page for mode_sense */
1885         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1886                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1887         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1888                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1889
1890         if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1891                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1892         memcpy(p, caching_pg, sizeof(caching_pg));
1893         if (1 == pcontrol)
1894                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1895         else if (2 == pcontrol)
1896                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1897         return sizeof(caching_pg);
1898 }
1899
1900 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1901 {       /* Control mode page for mode_sense */
1902         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1903                                         0, 0, 0, 0};
1904         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1905                                      0, 0, 0x2, 0x4b};
1906
1907         if (scsi_debug_dsense)
1908                 ctrl_m_pg[2] |= 0x4;
1909         else
1910                 ctrl_m_pg[2] &= ~0x4;
1911
1912         if (scsi_debug_ato)
1913                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1914
1915         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1916         if (1 == pcontrol)
1917                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1918         else if (2 == pcontrol)
1919                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1920         return sizeof(ctrl_m_pg);
1921 }
1922
1923
1924 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1925 {       /* Informational Exceptions control mode page for mode_sense */
1926         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1927                                        0, 0, 0x0, 0x0};
1928         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1929                                       0, 0, 0x0, 0x0};
1930
1931         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1932         if (1 == pcontrol)
1933                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1934         else if (2 == pcontrol)
1935                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1936         return sizeof(iec_m_pg);
1937 }
1938
1939 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1940 {       /* SAS SSP mode page - short format for mode_sense */
1941         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1942                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1943
1944         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1945         if (1 == pcontrol)
1946                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1947         return sizeof(sas_sf_m_pg);
1948 }
1949
1950
1951 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1952                               int target_dev_id)
1953 {       /* SAS phy control and discover mode page for mode_sense */
1954         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1955                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1956                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1957                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1958                     0x2, 0, 0, 0, 0, 0, 0, 0,
1959                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1960                     0, 0, 0, 0, 0, 0, 0, 0,
1961                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1962                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1963                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1964                     0x3, 0, 0, 0, 0, 0, 0, 0,
1965                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1966                     0, 0, 0, 0, 0, 0, 0, 0,
1967                 };
1968         int port_a, port_b;
1969
1970         port_a = target_dev_id + 1;
1971         port_b = port_a + 1;
1972         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1973         p[20] = (port_a >> 24);
1974         p[21] = (port_a >> 16) & 0xff;
1975         p[22] = (port_a >> 8) & 0xff;
1976         p[23] = port_a & 0xff;
1977         p[48 + 20] = (port_b >> 24);
1978         p[48 + 21] = (port_b >> 16) & 0xff;
1979         p[48 + 22] = (port_b >> 8) & 0xff;
1980         p[48 + 23] = port_b & 0xff;
1981         if (1 == pcontrol)
1982                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1983         return sizeof(sas_pcd_m_pg);
1984 }
1985
1986 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1987 {       /* SAS SSP shared protocol specific port mode subpage */
1988         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1989                     0, 0, 0, 0, 0, 0, 0, 0,
1990                 };
1991
1992         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1993         if (1 == pcontrol)
1994                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1995         return sizeof(sas_sha_m_pg);
1996 }
1997
1998 #define SDEBUG_MAX_MSENSE_SZ 256
1999
2000 static int
2001 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2002 {
2003         unsigned char dbd, llbaa;
2004         int pcontrol, pcode, subpcode, bd_len;
2005         unsigned char dev_spec;
2006         int k, alloc_len, msense_6, offset, len, target_dev_id;
2007         int target = scp->device->id;
2008         unsigned char * ap;
2009         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2010         unsigned char *cmd = scp->cmnd;
2011
2012         dbd = !!(cmd[1] & 0x8);
2013         pcontrol = (cmd[2] & 0xc0) >> 6;
2014         pcode = cmd[2] & 0x3f;
2015         subpcode = cmd[3];
2016         msense_6 = (MODE_SENSE == cmd[0]);
2017         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
2018         if ((0 == scsi_debug_ptype) && (0 == dbd))
2019                 bd_len = llbaa ? 16 : 8;
2020         else
2021                 bd_len = 0;
2022         alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
2023         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2024         if (0x3 == pcontrol) {  /* Saving values not supported */
2025                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2026                 return check_condition_result;
2027         }
2028         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2029                         (devip->target * 1000) - 3;
2030         /* set DPOFUA bit for disks */
2031         if (0 == scsi_debug_ptype)
2032                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
2033         else
2034                 dev_spec = 0x0;
2035         if (msense_6) {
2036                 arr[2] = dev_spec;
2037                 arr[3] = bd_len;
2038                 offset = 4;
2039         } else {
2040                 arr[3] = dev_spec;
2041                 if (16 == bd_len)
2042                         arr[4] = 0x1;   /* set LONGLBA bit */
2043                 arr[7] = bd_len;        /* assume 255 or less */
2044                 offset = 8;
2045         }
2046         ap = arr + offset;
2047         if ((bd_len > 0) && (!sdebug_capacity))
2048                 sdebug_capacity = get_sdebug_capacity();
2049
2050         if (8 == bd_len) {
2051                 if (sdebug_capacity > 0xfffffffe) {
2052                         ap[0] = 0xff;
2053                         ap[1] = 0xff;
2054                         ap[2] = 0xff;
2055                         ap[3] = 0xff;
2056                 } else {
2057                         ap[0] = (sdebug_capacity >> 24) & 0xff;
2058                         ap[1] = (sdebug_capacity >> 16) & 0xff;
2059                         ap[2] = (sdebug_capacity >> 8) & 0xff;
2060                         ap[3] = sdebug_capacity & 0xff;
2061                 }
2062                 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2063                 ap[7] = scsi_debug_sector_size & 0xff;
2064                 offset += bd_len;
2065                 ap = arr + offset;
2066         } else if (16 == bd_len) {
2067                 unsigned long long capac = sdebug_capacity;
2068
2069                 for (k = 0; k < 8; ++k, capac >>= 8)
2070                         ap[7 - k] = capac & 0xff;
2071                 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2072                 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2073                 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2074                 ap[15] = scsi_debug_sector_size & 0xff;
2075                 offset += bd_len;
2076                 ap = arr + offset;
2077         }
2078
2079         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2080                 /* TODO: Control Extension page */
2081                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2082                 return check_condition_result;
2083         }
2084         switch (pcode) {
2085         case 0x1:       /* Read-Write error recovery page, direct access */
2086                 len = resp_err_recov_pg(ap, pcontrol, target);
2087                 offset += len;
2088                 break;
2089         case 0x2:       /* Disconnect-Reconnect page, all devices */
2090                 len = resp_disconnect_pg(ap, pcontrol, target);
2091                 offset += len;
2092                 break;
2093         case 0x3:       /* Format device page, direct access */
2094                 len = resp_format_pg(ap, pcontrol, target);
2095                 offset += len;
2096                 break;
2097         case 0x8:       /* Caching page, direct access */
2098                 len = resp_caching_pg(ap, pcontrol, target);
2099                 offset += len;
2100                 break;
2101         case 0xa:       /* Control Mode page, all devices */
2102                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2103                 offset += len;
2104                 break;
2105         case 0x19:      /* if spc==1 then sas phy, control+discover */
2106                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2107                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2108                         return check_condition_result;
2109                 }
2110                 len = 0;
2111                 if ((0x0 == subpcode) || (0xff == subpcode))
2112                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2113                 if ((0x1 == subpcode) || (0xff == subpcode))
2114                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2115                                                   target_dev_id);
2116                 if ((0x2 == subpcode) || (0xff == subpcode))
2117                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2118                 offset += len;
2119                 break;
2120         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2121                 len = resp_iec_m_pg(ap, pcontrol, target);
2122                 offset += len;
2123                 break;
2124         case 0x3f:      /* Read all Mode pages */
2125                 if ((0 == subpcode) || (0xff == subpcode)) {
2126                         len = resp_err_recov_pg(ap, pcontrol, target);
2127                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2128                         len += resp_format_pg(ap + len, pcontrol, target);
2129                         len += resp_caching_pg(ap + len, pcontrol, target);
2130                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2131                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2132                         if (0xff == subpcode) {
2133                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2134                                                   target, target_dev_id);
2135                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2136                         }
2137                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2138                 } else {
2139                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2140                         return check_condition_result;
2141                 }
2142                 offset += len;
2143                 break;
2144         default:
2145                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2146                 return check_condition_result;
2147         }
2148         if (msense_6)
2149                 arr[0] = offset - 1;
2150         else {
2151                 arr[0] = ((offset - 2) >> 8) & 0xff;
2152                 arr[1] = (offset - 2) & 0xff;
2153         }
2154         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2155 }
2156
2157 #define SDEBUG_MAX_MSELECT_SZ 512
2158
2159 static int
2160 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2161 {
2162         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2163         int param_len, res, mpage;
2164         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2165         unsigned char *cmd = scp->cmnd;
2166         int mselect6 = (MODE_SELECT == cmd[0]);
2167
2168         memset(arr, 0, sizeof(arr));
2169         pf = cmd[1] & 0x10;
2170         sp = cmd[1] & 0x1;
2171         param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2172         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2173                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2174                 return check_condition_result;
2175         }
2176         res = fetch_to_dev_buffer(scp, arr, param_len);
2177         if (-1 == res)
2178                 return (DID_ERROR << 16);
2179         else if ((res < param_len) &&
2180                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2181                 sdev_printk(KERN_INFO, scp->device,
2182                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2183                             __func__, param_len, res);
2184         md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2185         bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2186         if (md_len > 2) {
2187                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2188                 return check_condition_result;
2189         }
2190         off = bd_len + (mselect6 ? 4 : 8);
2191         mpage = arr[off] & 0x3f;
2192         ps = !!(arr[off] & 0x80);
2193         if (ps) {
2194                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2195                 return check_condition_result;
2196         }
2197         spf = !!(arr[off] & 0x40);
2198         pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2199                        (arr[off + 1] + 2);
2200         if ((pg_len + off) > param_len) {
2201                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2202                                 PARAMETER_LIST_LENGTH_ERR, 0);
2203                 return check_condition_result;
2204         }
2205         switch (mpage) {
2206         case 0x8:      /* Caching Mode page */
2207                 if (caching_pg[1] == arr[off + 1]) {
2208                         memcpy(caching_pg + 2, arr + off + 2,
2209                                sizeof(caching_pg) - 2);
2210                         goto set_mode_changed_ua;
2211                 }
2212                 break;
2213         case 0xa:      /* Control Mode page */
2214                 if (ctrl_m_pg[1] == arr[off + 1]) {
2215                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2216                                sizeof(ctrl_m_pg) - 2);
2217                         scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2218                         goto set_mode_changed_ua;
2219                 }
2220                 break;
2221         case 0x1c:      /* Informational Exceptions Mode page */
2222                 if (iec_m_pg[1] == arr[off + 1]) {
2223                         memcpy(iec_m_pg + 2, arr + off + 2,
2224                                sizeof(iec_m_pg) - 2);
2225                         goto set_mode_changed_ua;
2226                 }
2227                 break;
2228         default:
2229                 break;
2230         }
2231         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2232         return check_condition_result;
2233 set_mode_changed_ua:
2234         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2235         return 0;
2236 }
2237
2238 static int resp_temp_l_pg(unsigned char * arr)
2239 {
2240         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2241                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2242                 };
2243
2244         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2245         return sizeof(temp_l_pg);
2246 }
2247
2248 static int resp_ie_l_pg(unsigned char * arr)
2249 {
2250         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2251                 };
2252
2253         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2254         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2255                 arr[4] = THRESHOLD_EXCEEDED;
2256                 arr[5] = 0xff;
2257         }
2258         return sizeof(ie_l_pg);
2259 }
2260
2261 #define SDEBUG_MAX_LSENSE_SZ 512
2262
2263 static int resp_log_sense(struct scsi_cmnd * scp,
2264                           struct sdebug_dev_info * devip)
2265 {
2266         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2267         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2268         unsigned char *cmd = scp->cmnd;
2269
2270         memset(arr, 0, sizeof(arr));
2271         ppc = cmd[1] & 0x2;
2272         sp = cmd[1] & 0x1;
2273         if (ppc || sp) {
2274                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2275                 return check_condition_result;
2276         }
2277         pcontrol = (cmd[2] & 0xc0) >> 6;
2278         pcode = cmd[2] & 0x3f;
2279         subpcode = cmd[3] & 0xff;
2280         alloc_len = (cmd[7] << 8) + cmd[8];
2281         arr[0] = pcode;
2282         if (0 == subpcode) {
2283                 switch (pcode) {
2284                 case 0x0:       /* Supported log pages log page */
2285                         n = 4;
2286                         arr[n++] = 0x0;         /* this page */
2287                         arr[n++] = 0xd;         /* Temperature */
2288                         arr[n++] = 0x2f;        /* Informational exceptions */
2289                         arr[3] = n - 4;
2290                         break;
2291                 case 0xd:       /* Temperature log page */
2292                         arr[3] = resp_temp_l_pg(arr + 4);
2293                         break;
2294                 case 0x2f:      /* Informational exceptions log page */
2295                         arr[3] = resp_ie_l_pg(arr + 4);
2296                         break;
2297                 default:
2298                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2299                         return check_condition_result;
2300                 }
2301         } else if (0xff == subpcode) {
2302                 arr[0] |= 0x40;
2303                 arr[1] = subpcode;
2304                 switch (pcode) {
2305                 case 0x0:       /* Supported log pages and subpages log page */
2306                         n = 4;
2307                         arr[n++] = 0x0;
2308                         arr[n++] = 0x0;         /* 0,0 page */
2309                         arr[n++] = 0x0;
2310                         arr[n++] = 0xff;        /* this page */
2311                         arr[n++] = 0xd;
2312                         arr[n++] = 0x0;         /* Temperature */
2313                         arr[n++] = 0x2f;
2314                         arr[n++] = 0x0; /* Informational exceptions */
2315                         arr[3] = n - 4;
2316                         break;
2317                 case 0xd:       /* Temperature subpages */
2318                         n = 4;
2319                         arr[n++] = 0xd;
2320                         arr[n++] = 0x0;         /* Temperature */
2321                         arr[3] = n - 4;
2322                         break;
2323                 case 0x2f:      /* Informational exceptions subpages */
2324                         n = 4;
2325                         arr[n++] = 0x2f;
2326                         arr[n++] = 0x0;         /* Informational exceptions */
2327                         arr[3] = n - 4;
2328                         break;
2329                 default:
2330                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2331                         return check_condition_result;
2332                 }
2333         } else {
2334                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2335                 return check_condition_result;
2336         }
2337         len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2338         return fill_from_dev_buffer(scp, arr,
2339                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2340 }
2341
2342 static int check_device_access_params(struct scsi_cmnd *scp,
2343                                       unsigned long long lba, unsigned int num)
2344 {
2345         if (lba + num > sdebug_capacity) {
2346                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2347                 return check_condition_result;
2348         }
2349         /* transfer length excessive (tie in to block limits VPD page) */
2350         if (num > sdebug_store_sectors) {
2351                 /* needs work to find which cdb byte 'num' comes from */
2352                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2353                 return check_condition_result;
2354         }
2355         return 0;
2356 }
2357
2358 /* Returns number of bytes copied or -1 if error. */
2359 static int
2360 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2361 {
2362         int ret;
2363         u64 block, rest = 0;
2364         struct scsi_data_buffer *sdb;
2365         enum dma_data_direction dir;
2366
2367         if (do_write) {
2368                 sdb = scsi_out(scmd);
2369                 dir = DMA_TO_DEVICE;
2370         } else {
2371                 sdb = scsi_in(scmd);
2372                 dir = DMA_FROM_DEVICE;
2373         }
2374
2375         if (!sdb->length)
2376                 return 0;
2377         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2378                 return -1;
2379
2380         block = do_div(lba, sdebug_store_sectors);
2381         if (block + num > sdebug_store_sectors)
2382                 rest = block + num - sdebug_store_sectors;
2383
2384         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2385                    fake_storep + (block * scsi_debug_sector_size),
2386                    (num - rest) * scsi_debug_sector_size, 0, do_write);
2387         if (ret != (num - rest) * scsi_debug_sector_size)
2388                 return ret;
2389
2390         if (rest) {
2391                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2392                             fake_storep, rest * scsi_debug_sector_size,
2393                             (num - rest) * scsi_debug_sector_size, do_write);
2394         }
2395
2396         return ret;
2397 }
2398
2399 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2400  * arr into fake_store(lba,num) and return true. If comparison fails then
2401  * return false. */
2402 static bool
2403 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2404 {
2405         bool res;
2406         u64 block, rest = 0;
2407         u32 store_blks = sdebug_store_sectors;
2408         u32 lb_size = scsi_debug_sector_size;
2409
2410         block = do_div(lba, store_blks);
2411         if (block + num > store_blks)
2412                 rest = block + num - store_blks;
2413
2414         res = !memcmp(fake_storep + (block * lb_size), arr,
2415                       (num - rest) * lb_size);
2416         if (!res)
2417                 return res;
2418         if (rest)
2419                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2420                              rest * lb_size);
2421         if (!res)
2422                 return res;
2423         arr += num * lb_size;
2424         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2425         if (rest)
2426                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2427                        rest * lb_size);
2428         return res;
2429 }
2430
2431 static __be16 dif_compute_csum(const void *buf, int len)
2432 {
2433         __be16 csum;
2434
2435         if (scsi_debug_guard)
2436                 csum = (__force __be16)ip_compute_csum(buf, len);
2437         else
2438                 csum = cpu_to_be16(crc_t10dif(buf, len));
2439
2440         return csum;
2441 }
2442
2443 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2444                       sector_t sector, u32 ei_lba)
2445 {
2446         __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2447
2448         if (sdt->guard_tag != csum) {
2449                 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2450                         __func__,
2451                         (unsigned long)sector,
2452                         be16_to_cpu(sdt->guard_tag),
2453                         be16_to_cpu(csum));
2454                 return 0x01;
2455         }
2456         if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2457             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2458                 pr_err("%s: REF check failed on sector %lu\n",
2459                         __func__, (unsigned long)sector);
2460                 return 0x03;
2461         }
2462         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2463             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2464                 pr_err("%s: REF check failed on sector %lu\n",
2465                         __func__, (unsigned long)sector);
2466                 return 0x03;
2467         }
2468         return 0;
2469 }
2470
2471 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2472                           unsigned int sectors, bool read)
2473 {
2474         size_t resid;
2475         void *paddr;
2476         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2477         struct sg_mapping_iter miter;
2478
2479         /* Bytes of protection data to copy into sgl */
2480         resid = sectors * sizeof(*dif_storep);
2481
2482         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2483                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2484                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2485
2486         while (sg_miter_next(&miter) && resid > 0) {
2487                 size_t len = min(miter.length, resid);
2488                 void *start = dif_store(sector);
2489                 size_t rest = 0;
2490
2491                 if (dif_store_end < start + len)
2492                         rest = start + len - dif_store_end;
2493
2494                 paddr = miter.addr;
2495
2496                 if (read)
2497                         memcpy(paddr, start, len - rest);
2498                 else
2499                         memcpy(start, paddr, len - rest);
2500
2501                 if (rest) {
2502                         if (read)
2503                                 memcpy(paddr + len - rest, dif_storep, rest);
2504                         else
2505                                 memcpy(dif_storep, paddr + len - rest, rest);
2506                 }
2507
2508                 sector += len / sizeof(*dif_storep);
2509                 resid -= len;
2510         }
2511         sg_miter_stop(&miter);
2512 }
2513
2514 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2515                             unsigned int sectors, u32 ei_lba)
2516 {
2517         unsigned int i;
2518         struct sd_dif_tuple *sdt;
2519         sector_t sector;
2520
2521         for (i = 0; i < sectors; i++, ei_lba++) {
2522                 int ret;
2523
2524                 sector = start_sec + i;
2525                 sdt = dif_store(sector);
2526
2527                 if (sdt->app_tag == cpu_to_be16(0xffff))
2528                         continue;
2529
2530                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2531                 if (ret) {
2532                         dif_errors++;
2533                         return ret;
2534                 }
2535         }
2536
2537         dif_copy_prot(SCpnt, start_sec, sectors, true);
2538         dix_reads++;
2539
2540         return 0;
2541 }
2542
2543 static int
2544 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2545 {
2546         u8 *cmd = scp->cmnd;
2547         u64 lba;
2548         u32 num;
2549         u32 ei_lba;
2550         unsigned long iflags;
2551         int ret;
2552         bool check_prot;
2553
2554         switch (cmd[0]) {
2555         case READ_16:
2556                 ei_lba = 0;
2557                 lba = get_unaligned_be64(cmd + 2);
2558                 num = get_unaligned_be32(cmd + 10);
2559                 check_prot = true;
2560                 break;
2561         case READ_10:
2562                 ei_lba = 0;
2563                 lba = get_unaligned_be32(cmd + 2);
2564                 num = get_unaligned_be16(cmd + 7);
2565                 check_prot = true;
2566                 break;
2567         case READ_6:
2568                 ei_lba = 0;
2569                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2570                       (u32)(cmd[1] & 0x1f) << 16;
2571                 num = (0 == cmd[4]) ? 256 : cmd[4];
2572                 check_prot = true;
2573                 break;
2574         case READ_12:
2575                 ei_lba = 0;
2576                 lba = get_unaligned_be32(cmd + 2);
2577                 num = get_unaligned_be32(cmd + 6);
2578                 check_prot = true;
2579                 break;
2580         case XDWRITEREAD_10:
2581                 ei_lba = 0;
2582                 lba = get_unaligned_be32(cmd + 2);
2583                 num = get_unaligned_be16(cmd + 7);
2584                 check_prot = false;
2585                 break;
2586         default:        /* assume READ(32) */
2587                 lba = get_unaligned_be64(cmd + 12);
2588                 ei_lba = get_unaligned_be32(cmd + 20);
2589                 num = get_unaligned_be32(cmd + 28);
2590                 check_prot = false;
2591                 break;
2592         }
2593         if (check_prot) {
2594                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2595                     (cmd[1] & 0xe0)) {
2596                         mk_sense_invalid_opcode(scp);
2597                         return check_condition_result;
2598                 }
2599                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2600                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2601                     (cmd[1] & 0xe0) == 0)
2602                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2603                                     "to DIF device\n");
2604         }
2605         if (sdebug_any_injecting_opt) {
2606                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2607
2608                 if (ep->inj_short)
2609                         num /= 2;
2610         }
2611
2612         /* inline check_device_access_params() */
2613         if (lba + num > sdebug_capacity) {
2614                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2615                 return check_condition_result;
2616         }
2617         /* transfer length excessive (tie in to block limits VPD page) */
2618         if (num > sdebug_store_sectors) {
2619                 /* needs work to find which cdb byte 'num' comes from */
2620                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2621                 return check_condition_result;
2622         }
2623
2624         if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2625             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2626             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2627                 /* claim unrecoverable read error */
2628                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2629                 /* set info field and valid bit for fixed descriptor */
2630                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2631                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2632                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2633                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2634                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2635                 }
2636                 scsi_set_resid(scp, scsi_bufflen(scp));
2637                 return check_condition_result;
2638         }
2639
2640         read_lock_irqsave(&atomic_rw, iflags);
2641
2642         /* DIX + T10 DIF */
2643         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2644                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2645
2646                 if (prot_ret) {
2647                         read_unlock_irqrestore(&atomic_rw, iflags);
2648                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2649                         return illegal_condition_result;
2650                 }
2651         }
2652
2653         ret = do_device_access(scp, lba, num, false);
2654         read_unlock_irqrestore(&atomic_rw, iflags);
2655         if (ret == -1)
2656                 return DID_ERROR << 16;
2657
2658         scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2659
2660         if (sdebug_any_injecting_opt) {
2661                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2662
2663                 if (ep->inj_recovered) {
2664                         mk_sense_buffer(scp, RECOVERED_ERROR,
2665                                         THRESHOLD_EXCEEDED, 0);
2666                         return check_condition_result;
2667                 } else if (ep->inj_transport) {
2668                         mk_sense_buffer(scp, ABORTED_COMMAND,
2669                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2670                         return check_condition_result;
2671                 } else if (ep->inj_dif) {
2672                         /* Logical block guard check failed */
2673                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2674                         return illegal_condition_result;
2675                 } else if (ep->inj_dix) {
2676                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2677                         return illegal_condition_result;
2678                 }
2679         }
2680         return 0;
2681 }
2682
2683 void dump_sector(unsigned char *buf, int len)
2684 {
2685         int i, j, n;
2686
2687         pr_err(">>> Sector Dump <<<\n");
2688         for (i = 0 ; i < len ; i += 16) {
2689                 char b[128];
2690
2691                 for (j = 0, n = 0; j < 16; j++) {
2692                         unsigned char c = buf[i+j];
2693
2694                         if (c >= 0x20 && c < 0x7e)
2695                                 n += scnprintf(b + n, sizeof(b) - n,
2696                                                " %c ", buf[i+j]);
2697                         else
2698                                 n += scnprintf(b + n, sizeof(b) - n,
2699                                                "%02x ", buf[i+j]);
2700                 }
2701                 pr_err("%04d: %s\n", i, b);
2702         }
2703 }
2704
2705 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2706                              unsigned int sectors, u32 ei_lba)
2707 {
2708         int ret;
2709         struct sd_dif_tuple *sdt;
2710         void *daddr;
2711         sector_t sector = start_sec;
2712         int ppage_offset;
2713         int dpage_offset;
2714         struct sg_mapping_iter diter;
2715         struct sg_mapping_iter piter;
2716
2717         BUG_ON(scsi_sg_count(SCpnt) == 0);
2718         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2719
2720         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2721                         scsi_prot_sg_count(SCpnt),
2722                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2723         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2724                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2725
2726         /* For each protection page */
2727         while (sg_miter_next(&piter)) {
2728                 dpage_offset = 0;
2729                 if (WARN_ON(!sg_miter_next(&diter))) {
2730                         ret = 0x01;
2731                         goto out;
2732                 }
2733
2734                 for (ppage_offset = 0; ppage_offset < piter.length;
2735                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2736                         /* If we're at the end of the current
2737                          * data page advance to the next one
2738                          */
2739                         if (dpage_offset >= diter.length) {
2740                                 if (WARN_ON(!sg_miter_next(&diter))) {
2741                                         ret = 0x01;
2742                                         goto out;
2743                                 }
2744                                 dpage_offset = 0;
2745                         }
2746
2747                         sdt = piter.addr + ppage_offset;
2748                         daddr = diter.addr + dpage_offset;
2749
2750                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2751                         if (ret) {
2752                                 dump_sector(daddr, scsi_debug_sector_size);
2753                                 goto out;
2754                         }
2755
2756                         sector++;
2757                         ei_lba++;
2758                         dpage_offset += scsi_debug_sector_size;
2759                 }
2760                 diter.consumed = dpage_offset;
2761                 sg_miter_stop(&diter);
2762         }
2763         sg_miter_stop(&piter);
2764
2765         dif_copy_prot(SCpnt, start_sec, sectors, false);
2766         dix_writes++;
2767
2768         return 0;
2769
2770 out:
2771         dif_errors++;
2772         sg_miter_stop(&diter);
2773         sg_miter_stop(&piter);
2774         return ret;
2775 }
2776
2777 static unsigned long lba_to_map_index(sector_t lba)
2778 {
2779         if (scsi_debug_unmap_alignment) {
2780                 lba += scsi_debug_unmap_granularity -
2781                         scsi_debug_unmap_alignment;
2782         }
2783         do_div(lba, scsi_debug_unmap_granularity);
2784
2785         return lba;
2786 }
2787
2788 static sector_t map_index_to_lba(unsigned long index)
2789 {
2790         sector_t lba = index * scsi_debug_unmap_granularity;
2791
2792         if (scsi_debug_unmap_alignment) {
2793                 lba -= scsi_debug_unmap_granularity -
2794                         scsi_debug_unmap_alignment;
2795         }
2796
2797         return lba;
2798 }
2799
2800 static unsigned int map_state(sector_t lba, unsigned int *num)
2801 {
2802         sector_t end;
2803         unsigned int mapped;
2804         unsigned long index;
2805         unsigned long next;
2806
2807         index = lba_to_map_index(lba);
2808         mapped = test_bit(index, map_storep);
2809
2810         if (mapped)
2811                 next = find_next_zero_bit(map_storep, map_size, index);
2812         else
2813                 next = find_next_bit(map_storep, map_size, index);
2814
2815         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2816         *num = end - lba;
2817
2818         return mapped;
2819 }
2820
2821 static void map_region(sector_t lba, unsigned int len)
2822 {
2823         sector_t end = lba + len;
2824
2825         while (lba < end) {
2826                 unsigned long index = lba_to_map_index(lba);
2827
2828                 if (index < map_size)
2829                         set_bit(index, map_storep);
2830
2831                 lba = map_index_to_lba(index + 1);
2832         }
2833 }
2834
2835 static void unmap_region(sector_t lba, unsigned int len)
2836 {
2837         sector_t end = lba + len;
2838
2839         while (lba < end) {
2840                 unsigned long index = lba_to_map_index(lba);
2841
2842                 if (lba == map_index_to_lba(index) &&
2843                     lba + scsi_debug_unmap_granularity <= end &&
2844                     index < map_size) {
2845                         clear_bit(index, map_storep);
2846                         if (scsi_debug_lbprz) {
2847                                 memset(fake_storep +
2848                                        lba * scsi_debug_sector_size, 0,
2849                                        scsi_debug_sector_size *
2850                                        scsi_debug_unmap_granularity);
2851                         }
2852                         if (dif_storep) {
2853                                 memset(dif_storep + lba, 0xff,
2854                                        sizeof(*dif_storep) *
2855                                        scsi_debug_unmap_granularity);
2856                         }
2857                 }
2858                 lba = map_index_to_lba(index + 1);
2859         }
2860 }
2861
2862 static int
2863 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2864 {
2865         u8 *cmd = scp->cmnd;
2866         u64 lba;
2867         u32 num;
2868         u32 ei_lba;
2869         unsigned long iflags;
2870         int ret;
2871         bool check_prot;
2872
2873         switch (cmd[0]) {
2874         case WRITE_16:
2875                 ei_lba = 0;
2876                 lba = get_unaligned_be64(cmd + 2);
2877                 num = get_unaligned_be32(cmd + 10);
2878                 check_prot = true;
2879                 break;
2880         case WRITE_10:
2881                 ei_lba = 0;
2882                 lba = get_unaligned_be32(cmd + 2);
2883                 num = get_unaligned_be16(cmd + 7);
2884                 check_prot = true;
2885                 break;
2886         case WRITE_6:
2887                 ei_lba = 0;
2888                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2889                       (u32)(cmd[1] & 0x1f) << 16;
2890                 num = (0 == cmd[4]) ? 256 : cmd[4];
2891                 check_prot = true;
2892                 break;
2893         case WRITE_12:
2894                 ei_lba = 0;
2895                 lba = get_unaligned_be32(cmd + 2);
2896                 num = get_unaligned_be32(cmd + 6);
2897                 check_prot = true;
2898                 break;
2899         case 0x53:      /* XDWRITEREAD(10) */
2900                 ei_lba = 0;
2901                 lba = get_unaligned_be32(cmd + 2);
2902                 num = get_unaligned_be16(cmd + 7);
2903                 check_prot = false;
2904                 break;
2905         default:        /* assume WRITE(32) */
2906                 lba = get_unaligned_be64(cmd + 12);
2907                 ei_lba = get_unaligned_be32(cmd + 20);
2908                 num = get_unaligned_be32(cmd + 28);
2909                 check_prot = false;
2910                 break;
2911         }
2912         if (check_prot) {
2913                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2914                     (cmd[1] & 0xe0)) {
2915                         mk_sense_invalid_opcode(scp);
2916                         return check_condition_result;
2917                 }
2918                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2919                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2920                     (cmd[1] & 0xe0) == 0)
2921                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2922                                     "to DIF device\n");
2923         }
2924
2925         /* inline check_device_access_params() */
2926         if (lba + num > sdebug_capacity) {
2927                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2928                 return check_condition_result;
2929         }
2930         /* transfer length excessive (tie in to block limits VPD page) */
2931         if (num > sdebug_store_sectors) {
2932                 /* needs work to find which cdb byte 'num' comes from */
2933                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2934                 return check_condition_result;
2935         }
2936
2937         write_lock_irqsave(&atomic_rw, iflags);
2938
2939         /* DIX + T10 DIF */
2940         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2941                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2942
2943                 if (prot_ret) {
2944                         write_unlock_irqrestore(&atomic_rw, iflags);
2945                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2946                         return illegal_condition_result;
2947                 }
2948         }
2949
2950         ret = do_device_access(scp, lba, num, true);
2951         if (scsi_debug_lbp())
2952                 map_region(lba, num);
2953         write_unlock_irqrestore(&atomic_rw, iflags);
2954         if (-1 == ret)
2955                 return (DID_ERROR << 16);
2956         else if ((ret < (num * scsi_debug_sector_size)) &&
2957                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2958                 sdev_printk(KERN_INFO, scp->device,
2959                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2960                             my_name, num * scsi_debug_sector_size, ret);
2961
2962         if (sdebug_any_injecting_opt) {
2963                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2964
2965                 if (ep->inj_recovered) {
2966                         mk_sense_buffer(scp, RECOVERED_ERROR,
2967                                         THRESHOLD_EXCEEDED, 0);
2968                         return check_condition_result;
2969                 } else if (ep->inj_dif) {
2970                         /* Logical block guard check failed */
2971                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2972                         return illegal_condition_result;
2973                 } else if (ep->inj_dix) {
2974                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2975                         return illegal_condition_result;
2976                 }
2977         }
2978         return 0;
2979 }
2980
2981 static int
2982 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2983                 bool unmap, bool ndob)
2984 {
2985         unsigned long iflags;
2986         unsigned long long i;
2987         int ret;
2988
2989         ret = check_device_access_params(scp, lba, num);
2990         if (ret)
2991                 return ret;
2992
2993         write_lock_irqsave(&atomic_rw, iflags);
2994
2995         if (unmap && scsi_debug_lbp()) {
2996                 unmap_region(lba, num);
2997                 goto out;
2998         }
2999
3000         /* if ndob then zero 1 logical block, else fetch 1 logical block */
3001         if (ndob) {
3002                 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
3003                        scsi_debug_sector_size);
3004                 ret = 0;
3005         } else
3006                 ret = fetch_to_dev_buffer(scp, fake_storep +
3007                                                (lba * scsi_debug_sector_size),
3008                                           scsi_debug_sector_size);
3009
3010         if (-1 == ret) {
3011                 write_unlock_irqrestore(&atomic_rw, iflags);
3012                 return (DID_ERROR << 16);
3013         } else if ((ret < (num * scsi_debug_sector_size)) &&
3014                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3015                 sdev_printk(KERN_INFO, scp->device,
3016                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3017                             my_name, "write same",
3018                             num * scsi_debug_sector_size, ret);
3019
3020         /* Copy first sector to remaining blocks */
3021         for (i = 1 ; i < num ; i++)
3022                 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
3023                        fake_storep + (lba * scsi_debug_sector_size),
3024                        scsi_debug_sector_size);
3025
3026         if (scsi_debug_lbp())
3027                 map_region(lba, num);
3028 out:
3029         write_unlock_irqrestore(&atomic_rw, iflags);
3030
3031         return 0;
3032 }
3033
3034 static int
3035 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3036 {
3037         u8 *cmd = scp->cmnd;
3038         u32 lba;
3039         u16 num;
3040         u32 ei_lba = 0;
3041         bool unmap = false;
3042
3043         if (cmd[1] & 0x8) {
3044                 if (scsi_debug_lbpws10 == 0) {
3045                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3046                         return check_condition_result;
3047                 } else
3048                         unmap = true;
3049         }
3050         lba = get_unaligned_be32(cmd + 2);
3051         num = get_unaligned_be16(cmd + 7);
3052         if (num > scsi_debug_write_same_length) {
3053                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3054                 return check_condition_result;
3055         }
3056         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3057 }
3058
3059 static int
3060 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3061 {
3062         u8 *cmd = scp->cmnd;
3063         u64 lba;
3064         u32 num;
3065         u32 ei_lba = 0;
3066         bool unmap = false;
3067         bool ndob = false;
3068
3069         if (cmd[1] & 0x8) {     /* UNMAP */
3070                 if (scsi_debug_lbpws == 0) {
3071                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3072                         return check_condition_result;
3073                 } else
3074                         unmap = true;
3075         }
3076         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3077                 ndob = true;
3078         lba = get_unaligned_be64(cmd + 2);
3079         num = get_unaligned_be32(cmd + 10);
3080         if (num > scsi_debug_write_same_length) {
3081                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3082                 return check_condition_result;
3083         }
3084         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3085 }
3086
3087 /* Note the mode field is in the same position as the (lower) service action
3088  * field. For the Report supported operation codes command, SPC-4 suggests
3089  * each mode of this command should be reported separately; for future. */
3090 static int
3091 resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3092 {
3093         u8 *cmd = scp->cmnd;
3094         struct scsi_device *sdp = scp->device;
3095         struct sdebug_dev_info *dp;
3096         u8 mode;
3097
3098         mode = cmd[1] & 0x1f;
3099         switch (mode) {
3100         case 0x4:       /* download microcode (MC) and activate (ACT) */
3101                 /* set UAs on this device only */
3102                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3103                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3104                 break;
3105         case 0x5:       /* download MC, save and ACT */
3106                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3107                 break;
3108         case 0x6:       /* download MC with offsets and ACT */
3109                 /* set UAs on most devices (LUs) in this target */
3110                 list_for_each_entry(dp,
3111                                     &devip->sdbg_host->dev_info_list,
3112                                     dev_list)
3113                         if (dp->target == sdp->id) {
3114                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3115                                 if (devip != dp)
3116                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3117                                                 dp->uas_bm);
3118                         }
3119                 break;
3120         case 0x7:       /* download MC with offsets, save, and ACT */
3121                 /* set UA on all devices (LUs) in this target */
3122                 list_for_each_entry(dp,
3123                                     &devip->sdbg_host->dev_info_list,
3124                                     dev_list)
3125                         if (dp->target == sdp->id)
3126                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3127                                         dp->uas_bm);
3128                 break;
3129         default:
3130                 /* do nothing for this command for other mode values */
3131                 break;
3132         }
3133         return 0;
3134 }
3135
3136 static int
3137 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3138 {
3139         u8 *cmd = scp->cmnd;
3140         u8 *arr;
3141         u8 *fake_storep_hold;
3142         u64 lba;
3143         u32 dnum;
3144         u32 lb_size = scsi_debug_sector_size;
3145         u8 num;
3146         unsigned long iflags;
3147         int ret;
3148         int retval = 0;
3149
3150         lba = get_unaligned_be64(cmd + 2);
3151         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3152         if (0 == num)
3153                 return 0;       /* degenerate case, not an error */
3154         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3155             (cmd[1] & 0xe0)) {
3156                 mk_sense_invalid_opcode(scp);
3157                 return check_condition_result;
3158         }
3159         if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3160              scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3161             (cmd[1] & 0xe0) == 0)
3162                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3163                             "to DIF device\n");
3164
3165         /* inline check_device_access_params() */
3166         if (lba + num > sdebug_capacity) {
3167                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3168                 return check_condition_result;
3169         }
3170         /* transfer length excessive (tie in to block limits VPD page) */
3171         if (num > sdebug_store_sectors) {
3172                 /* needs work to find which cdb byte 'num' comes from */
3173                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3174                 return check_condition_result;
3175         }
3176         dnum = 2 * num;
3177         arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3178         if (NULL == arr) {
3179                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3180                                 INSUFF_RES_ASCQ);
3181                 return check_condition_result;
3182         }
3183
3184         write_lock_irqsave(&atomic_rw, iflags);
3185
3186         /* trick do_device_access() to fetch both compare and write buffers
3187          * from data-in into arr. Safe (atomic) since write_lock held. */
3188         fake_storep_hold = fake_storep;
3189         fake_storep = arr;
3190         ret = do_device_access(scp, 0, dnum, true);
3191         fake_storep = fake_storep_hold;
3192         if (ret == -1) {
3193                 retval = DID_ERROR << 16;
3194                 goto cleanup;
3195         } else if ((ret < (dnum * lb_size)) &&
3196                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3197                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3198                             "indicated=%u, IO sent=%d bytes\n", my_name,
3199                             dnum * lb_size, ret);
3200         if (!comp_write_worker(lba, num, arr)) {
3201                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3202                 retval = check_condition_result;
3203                 goto cleanup;
3204         }
3205         if (scsi_debug_lbp())
3206                 map_region(lba, num);
3207 cleanup:
3208         write_unlock_irqrestore(&atomic_rw, iflags);
3209         kfree(arr);
3210         return retval;
3211 }
3212
3213 struct unmap_block_desc {
3214         __be64  lba;
3215         __be32  blocks;
3216         __be32  __reserved;
3217 };
3218
3219 static int
3220 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3221 {
3222         unsigned char *buf;
3223         struct unmap_block_desc *desc;
3224         unsigned int i, payload_len, descriptors;
3225         int ret;
3226         unsigned long iflags;
3227
3228
3229         if (!scsi_debug_lbp())
3230                 return 0;       /* fib and say its done */
3231         payload_len = get_unaligned_be16(scp->cmnd + 7);
3232         BUG_ON(scsi_bufflen(scp) != payload_len);
3233
3234         descriptors = (payload_len - 8) / 16;
3235         if (descriptors > scsi_debug_unmap_max_desc) {
3236                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3237                 return check_condition_result;
3238         }
3239
3240         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3241         if (!buf) {
3242                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3243                                 INSUFF_RES_ASCQ);
3244                 return check_condition_result;
3245         }
3246
3247         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3248
3249         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3250         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3251
3252         desc = (void *)&buf[8];
3253
3254         write_lock_irqsave(&atomic_rw, iflags);
3255
3256         for (i = 0 ; i < descriptors ; i++) {
3257                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3258                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3259
3260                 ret = check_device_access_params(scp, lba, num);
3261                 if (ret)
3262                         goto out;
3263
3264                 unmap_region(lba, num);
3265         }
3266
3267         ret = 0;
3268
3269 out:
3270         write_unlock_irqrestore(&atomic_rw, iflags);
3271         kfree(buf);
3272
3273         return ret;
3274 }
3275
3276 #define SDEBUG_GET_LBA_STATUS_LEN 32
3277
3278 static int
3279 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3280 {
3281         u8 *cmd = scp->cmnd;
3282         u64 lba;
3283         u32 alloc_len, mapped, num;
3284         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3285         int ret;
3286
3287         lba = get_unaligned_be64(cmd + 2);
3288         alloc_len = get_unaligned_be32(cmd + 10);
3289
3290         if (alloc_len < 24)
3291                 return 0;
3292
3293         ret = check_device_access_params(scp, lba, 1);
3294         if (ret)
3295                 return ret;
3296
3297         if (scsi_debug_lbp())
3298                 mapped = map_state(lba, &num);
3299         else {
3300                 mapped = 1;
3301                 /* following just in case virtual_gb changed */
3302                 sdebug_capacity = get_sdebug_capacity();
3303                 if (sdebug_capacity - lba <= 0xffffffff)
3304                         num = sdebug_capacity - lba;
3305                 else
3306                         num = 0xffffffff;
3307         }
3308
3309         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3310         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3311         put_unaligned_be64(lba, arr + 8);       /* LBA */
3312         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3313         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3314
3315         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3316 }
3317
3318 #define SDEBUG_RLUN_ARR_SZ 256
3319
3320 static int resp_report_luns(struct scsi_cmnd * scp,
3321                             struct sdebug_dev_info * devip)
3322 {
3323         unsigned int alloc_len;
3324         int lun_cnt, i, upper, num, n, want_wlun, shortish;
3325         u64 lun;
3326         unsigned char *cmd = scp->cmnd;
3327         int select_report = (int)cmd[2];
3328         struct scsi_lun *one_lun;
3329         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3330         unsigned char * max_addr;
3331
3332         clear_luns_changed_on_target(devip);
3333         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3334         shortish = (alloc_len < 4);
3335         if (shortish || (select_report > 2)) {
3336                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3337                 return check_condition_result;
3338         }
3339         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3340         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3341         lun_cnt = scsi_debug_max_luns;
3342         if (1 == select_report)
3343                 lun_cnt = 0;
3344         else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3345                 --lun_cnt;
3346         want_wlun = (select_report > 0) ? 1 : 0;
3347         num = lun_cnt + want_wlun;
3348         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3349         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3350         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3351                             sizeof(struct scsi_lun)), num);
3352         if (n < num) {
3353                 want_wlun = 0;
3354                 lun_cnt = n;
3355         }
3356         one_lun = (struct scsi_lun *) &arr[8];
3357         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3358         for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3359              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3360              i++, lun++) {
3361                 upper = (lun >> 8) & 0x3f;
3362                 if (upper)
3363                         one_lun[i].scsi_lun[0] =
3364                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3365                 one_lun[i].scsi_lun[1] = lun & 0xff;
3366         }
3367         if (want_wlun) {
3368                 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
3369                 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
3370                 i++;
3371         }
3372         alloc_len = (unsigned char *)(one_lun + i) - arr;
3373         return fill_from_dev_buffer(scp, arr,
3374                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3375 }
3376
3377 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3378                             unsigned int num, struct sdebug_dev_info *devip)
3379 {
3380         int j;
3381         unsigned char *kaddr, *buf;
3382         unsigned int offset;
3383         struct scsi_data_buffer *sdb = scsi_in(scp);
3384         struct sg_mapping_iter miter;
3385
3386         /* better not to use temporary buffer. */
3387         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3388         if (!buf) {
3389                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3390                                 INSUFF_RES_ASCQ);
3391                 return check_condition_result;
3392         }
3393
3394         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3395
3396         offset = 0;
3397         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3398                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
3399
3400         while (sg_miter_next(&miter)) {
3401                 kaddr = miter.addr;
3402                 for (j = 0; j < miter.length; j++)
3403                         *(kaddr + j) ^= *(buf + offset + j);
3404
3405                 offset += miter.length;
3406         }
3407         sg_miter_stop(&miter);
3408         kfree(buf);
3409
3410         return 0;
3411 }
3412
3413 static int
3414 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3415 {
3416         u8 *cmd = scp->cmnd;
3417         u64 lba;
3418         u32 num;
3419         int errsts;
3420
3421         if (!scsi_bidi_cmnd(scp)) {
3422                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3423                                 INSUFF_RES_ASCQ);
3424                 return check_condition_result;
3425         }
3426         errsts = resp_read_dt0(scp, devip);
3427         if (errsts)
3428                 return errsts;
3429         if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3430                 errsts = resp_write_dt0(scp, devip);
3431                 if (errsts)
3432                         return errsts;
3433         }
3434         lba = get_unaligned_be32(cmd + 2);
3435         num = get_unaligned_be16(cmd + 7);
3436         return resp_xdwriteread(scp, lba, num, devip);
3437 }
3438
3439 /* When timer or tasklet goes off this function is called. */
3440 static void sdebug_q_cmd_complete(unsigned long indx)
3441 {
3442         int qa_indx;
3443         int retiring = 0;
3444         unsigned long iflags;
3445         struct sdebug_queued_cmd *sqcp;
3446         struct scsi_cmnd *scp;
3447         struct sdebug_dev_info *devip;
3448
3449         atomic_inc(&sdebug_completions);
3450         qa_indx = indx;
3451         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3452                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3453                 return;
3454         }
3455         spin_lock_irqsave(&queued_arr_lock, iflags);
3456         sqcp = &queued_arr[qa_indx];
3457         scp = sqcp->a_cmnd;
3458         if (NULL == scp) {
3459                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3460                 pr_err("%s: scp is NULL\n", __func__);
3461                 return;
3462         }
3463         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3464         if (devip)
3465                 atomic_dec(&devip->num_in_q);
3466         else
3467                 pr_err("%s: devip=NULL\n", __func__);
3468         if (atomic_read(&retired_max_queue) > 0)
3469                 retiring = 1;
3470
3471         sqcp->a_cmnd = NULL;
3472         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3473                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3474                 pr_err("%s: Unexpected completion\n", __func__);
3475                 return;
3476         }
3477
3478         if (unlikely(retiring)) {       /* user has reduced max_queue */
3479                 int k, retval;
3480
3481                 retval = atomic_read(&retired_max_queue);
3482                 if (qa_indx >= retval) {
3483                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3484                         pr_err("%s: index %d too large\n", __func__, retval);
3485                         return;
3486                 }
3487                 k = find_last_bit(queued_in_use_bm, retval);
3488                 if ((k < scsi_debug_max_queue) || (k == retval))
3489                         atomic_set(&retired_max_queue, 0);
3490                 else
3491                         atomic_set(&retired_max_queue, k + 1);
3492         }
3493         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3494         scp->scsi_done(scp); /* callback to mid level */
3495 }
3496
3497 /* When high resolution timer goes off this function is called. */
3498 static enum hrtimer_restart
3499 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3500 {
3501         int qa_indx;
3502         int retiring = 0;
3503         unsigned long iflags;
3504         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3505         struct sdebug_queued_cmd *sqcp;
3506         struct scsi_cmnd *scp;
3507         struct sdebug_dev_info *devip;
3508
3509         atomic_inc(&sdebug_completions);
3510         qa_indx = sd_hrtp->qa_indx;
3511         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3512                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3513                 goto the_end;
3514         }
3515         spin_lock_irqsave(&queued_arr_lock, iflags);
3516         sqcp = &queued_arr[qa_indx];
3517         scp = sqcp->a_cmnd;
3518         if (NULL == scp) {
3519                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3520                 pr_err("%s: scp is NULL\n", __func__);
3521                 goto the_end;
3522         }
3523         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3524         if (devip)
3525                 atomic_dec(&devip->num_in_q);
3526         else
3527                 pr_err("%s: devip=NULL\n", __func__);
3528         if (atomic_read(&retired_max_queue) > 0)
3529                 retiring = 1;
3530
3531         sqcp->a_cmnd = NULL;
3532         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3533                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3534                 pr_err("%s: Unexpected completion\n", __func__);
3535                 goto the_end;
3536         }
3537
3538         if (unlikely(retiring)) {       /* user has reduced max_queue */
3539                 int k, retval;
3540
3541                 retval = atomic_read(&retired_max_queue);
3542                 if (qa_indx >= retval) {
3543                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3544                         pr_err("%s: index %d too large\n", __func__, retval);
3545                         goto the_end;
3546                 }
3547                 k = find_last_bit(queued_in_use_bm, retval);
3548                 if ((k < scsi_debug_max_queue) || (k == retval))
3549                         atomic_set(&retired_max_queue, 0);
3550                 else
3551                         atomic_set(&retired_max_queue, k + 1);
3552         }
3553         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3554         scp->scsi_done(scp); /* callback to mid level */
3555 the_end:
3556         return HRTIMER_NORESTART;
3557 }
3558
3559 static struct sdebug_dev_info *
3560 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3561 {
3562         struct sdebug_dev_info *devip;
3563
3564         devip = kzalloc(sizeof(*devip), flags);
3565         if (devip) {
3566                 devip->sdbg_host = sdbg_host;
3567                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3568         }
3569         return devip;
3570 }
3571
3572 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3573 {
3574         struct sdebug_host_info * sdbg_host;
3575         struct sdebug_dev_info * open_devip = NULL;
3576         struct sdebug_dev_info * devip =
3577                         (struct sdebug_dev_info *)sdev->hostdata;
3578
3579         if (devip)
3580                 return devip;
3581         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3582         if (!sdbg_host) {
3583                 pr_err("%s: Host info NULL\n", __func__);
3584                 return NULL;
3585         }
3586         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3587                 if ((devip->used) && (devip->channel == sdev->channel) &&
3588                     (devip->target == sdev->id) &&
3589                     (devip->lun == sdev->lun))
3590                         return devip;
3591                 else {
3592                         if ((!devip->used) && (!open_devip))
3593                                 open_devip = devip;
3594                 }
3595         }
3596         if (!open_devip) { /* try and make a new one */
3597                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3598                 if (!open_devip) {
3599                         printk(KERN_ERR "%s: out of memory at line %d\n",
3600                                 __func__, __LINE__);
3601                         return NULL;
3602                 }
3603         }
3604
3605         open_devip->channel = sdev->channel;
3606         open_devip->target = sdev->id;
3607         open_devip->lun = sdev->lun;
3608         open_devip->sdbg_host = sdbg_host;
3609         atomic_set(&open_devip->num_in_q, 0);
3610         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3611         open_devip->used = true;
3612         return open_devip;
3613 }
3614
3615 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3616 {
3617         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3618                 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
3619                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3620         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3621         return 0;
3622 }
3623
3624 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3625 {
3626         struct sdebug_dev_info *devip;
3627
3628         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3629                 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
3630                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3631         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3632                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3633         devip = devInfoReg(sdp);
3634         if (NULL == devip)
3635                 return 1;       /* no resources, will be marked offline */
3636         sdp->hostdata = devip;
3637         blk_queue_max_segment_size(sdp->request_queue, -1U);
3638         if (scsi_debug_no_uld)
3639                 sdp->no_uld_attach = 1;
3640         return 0;
3641 }
3642
3643 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3644 {
3645         struct sdebug_dev_info *devip =
3646                 (struct sdebug_dev_info *)sdp->hostdata;
3647
3648         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3649                 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
3650                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3651         if (devip) {
3652                 /* make this slot available for re-use */
3653                 devip->used = false;
3654                 sdp->hostdata = NULL;
3655         }
3656 }
3657
3658 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3659 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3660 {
3661         unsigned long iflags;
3662         int k, qmax, r_qmax;
3663         struct sdebug_queued_cmd *sqcp;
3664         struct sdebug_dev_info *devip;
3665
3666         spin_lock_irqsave(&queued_arr_lock, iflags);
3667         qmax = scsi_debug_max_queue;
3668         r_qmax = atomic_read(&retired_max_queue);
3669         if (r_qmax > qmax)
3670                 qmax = r_qmax;
3671         for (k = 0; k < qmax; ++k) {
3672                 if (test_bit(k, queued_in_use_bm)) {
3673                         sqcp = &queued_arr[k];
3674                         if (cmnd == sqcp->a_cmnd) {
3675                                 devip = (struct sdebug_dev_info *)
3676                                         cmnd->device->hostdata;
3677                                 if (devip)
3678                                         atomic_dec(&devip->num_in_q);
3679                                 sqcp->a_cmnd = NULL;
3680                                 spin_unlock_irqrestore(&queued_arr_lock,
3681                                                        iflags);
3682                                 if (scsi_debug_ndelay > 0) {
3683                                         if (sqcp->sd_hrtp)
3684                                                 hrtimer_cancel(
3685                                                         &sqcp->sd_hrtp->hrt);
3686                                 } else if (scsi_debug_delay > 0) {
3687                                         if (sqcp->cmnd_timerp)
3688                                                 del_timer_sync(
3689                                                         sqcp->cmnd_timerp);
3690                                 } else if (scsi_debug_delay < 0) {
3691                                         if (sqcp->tletp)
3692                                                 tasklet_kill(sqcp->tletp);
3693                                 }
3694                                 clear_bit(k, queued_in_use_bm);
3695                                 return 1;
3696                         }
3697                 }
3698         }
3699         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3700         return 0;
3701 }
3702
3703 /* Deletes (stops) timers or tasklets of all queued commands */
3704 static void stop_all_queued(void)
3705 {
3706         unsigned long iflags;
3707         int k;
3708         struct sdebug_queued_cmd *sqcp;
3709         struct sdebug_dev_info *devip;
3710
3711         spin_lock_irqsave(&queued_arr_lock, iflags);
3712         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3713                 if (test_bit(k, queued_in_use_bm)) {
3714                         sqcp = &queued_arr[k];
3715                         if (sqcp->a_cmnd) {
3716                                 devip = (struct sdebug_dev_info *)
3717                                         sqcp->a_cmnd->device->hostdata;
3718                                 if (devip)
3719                                         atomic_dec(&devip->num_in_q);
3720                                 sqcp->a_cmnd = NULL;
3721                                 spin_unlock_irqrestore(&queued_arr_lock,
3722                                                        iflags);
3723                                 if (scsi_debug_ndelay > 0) {
3724                                         if (sqcp->sd_hrtp)
3725                                                 hrtimer_cancel(
3726                                                         &sqcp->sd_hrtp->hrt);
3727                                 } else if (scsi_debug_delay > 0) {
3728                                         if (sqcp->cmnd_timerp)
3729                                                 del_timer_sync(
3730                                                         sqcp->cmnd_timerp);
3731                                 } else if (scsi_debug_delay < 0) {
3732                                         if (sqcp->tletp)
3733                                                 tasklet_kill(sqcp->tletp);
3734                                 }
3735                                 clear_bit(k, queued_in_use_bm);
3736                                 spin_lock_irqsave(&queued_arr_lock, iflags);
3737                         }
3738                 }
3739         }
3740         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3741 }
3742
3743 /* Free queued command memory on heap */
3744 static void free_all_queued(void)
3745 {
3746         unsigned long iflags;
3747         int k;
3748         struct sdebug_queued_cmd *sqcp;
3749
3750         spin_lock_irqsave(&queued_arr_lock, iflags);
3751         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3752                 sqcp = &queued_arr[k];
3753                 kfree(sqcp->cmnd_timerp);
3754                 sqcp->cmnd_timerp = NULL;
3755                 kfree(sqcp->tletp);
3756                 sqcp->tletp = NULL;
3757                 kfree(sqcp->sd_hrtp);
3758                 sqcp->sd_hrtp = NULL;
3759         }
3760         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3761 }
3762
3763 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3764 {
3765         ++num_aborts;
3766         if (SCpnt) {
3767                 if (SCpnt->device &&
3768                     (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3769                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3770                                     __func__);
3771                 stop_queued_cmnd(SCpnt);
3772         }
3773         return SUCCESS;
3774 }
3775
3776 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3777 {
3778         struct sdebug_dev_info * devip;
3779
3780         ++num_dev_resets;
3781         if (SCpnt && SCpnt->device) {
3782                 struct scsi_device *sdp = SCpnt->device;
3783
3784                 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3785                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3786                 devip = devInfoReg(sdp);
3787                 if (devip)
3788                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
3789         }
3790         return SUCCESS;
3791 }
3792
3793 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3794 {
3795         struct sdebug_host_info *sdbg_host;
3796         struct sdebug_dev_info *devip;
3797         struct scsi_device *sdp;
3798         struct Scsi_Host *hp;
3799         int k = 0;
3800
3801         ++num_target_resets;
3802         if (!SCpnt)
3803                 goto lie;
3804         sdp = SCpnt->device;
3805         if (!sdp)
3806                 goto lie;
3807         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3808                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3809         hp = sdp->host;
3810         if (!hp)
3811                 goto lie;
3812         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3813         if (sdbg_host) {
3814                 list_for_each_entry(devip,
3815                                     &sdbg_host->dev_info_list,
3816                                     dev_list)
3817                         if (devip->target == sdp->id) {
3818                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3819                                 ++k;
3820                         }
3821         }
3822         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3823                 sdev_printk(KERN_INFO, sdp,
3824                             "%s: %d device(s) found in target\n", __func__, k);
3825 lie:
3826         return SUCCESS;
3827 }
3828
3829 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3830 {
3831         struct sdebug_host_info *sdbg_host;
3832         struct sdebug_dev_info *devip;
3833         struct scsi_device * sdp;
3834         struct Scsi_Host * hp;
3835         int k = 0;
3836
3837         ++num_bus_resets;
3838         if (!(SCpnt && SCpnt->device))
3839                 goto lie;
3840         sdp = SCpnt->device;
3841         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3842                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3843         hp = sdp->host;
3844         if (hp) {
3845                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3846                 if (sdbg_host) {
3847                         list_for_each_entry(devip,
3848                                             &sdbg_host->dev_info_list,
3849                                             dev_list) {
3850                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3851                                 ++k;
3852                         }
3853                 }
3854         }
3855         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3856                 sdev_printk(KERN_INFO, sdp,
3857                             "%s: %d device(s) found in host\n", __func__, k);
3858 lie:
3859         return SUCCESS;
3860 }
3861
3862 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3863 {
3864         struct sdebug_host_info * sdbg_host;
3865         struct sdebug_dev_info *devip;
3866         int k = 0;
3867
3868         ++num_host_resets;
3869         if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3870                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3871         spin_lock(&sdebug_host_list_lock);
3872         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3873                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3874                                     dev_list) {
3875                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3876                         ++k;
3877                 }
3878         }
3879         spin_unlock(&sdebug_host_list_lock);
3880         stop_all_queued();
3881         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3882                 sdev_printk(KERN_INFO, SCpnt->device,
3883                             "%s: %d device(s) found\n", __func__, k);
3884         return SUCCESS;
3885 }
3886
3887 static void __init sdebug_build_parts(unsigned char *ramp,
3888                                       unsigned long store_size)
3889 {
3890         struct partition * pp;
3891         int starts[SDEBUG_MAX_PARTS + 2];
3892         int sectors_per_part, num_sectors, k;
3893         int heads_by_sects, start_sec, end_sec;
3894
3895         /* assume partition table already zeroed */
3896         if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3897                 return;
3898         if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3899                 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3900                 pr_warn("%s: reducing partitions to %d\n", __func__,
3901                         SDEBUG_MAX_PARTS);
3902         }
3903         num_sectors = (int)sdebug_store_sectors;
3904         sectors_per_part = (num_sectors - sdebug_sectors_per)
3905                            / scsi_debug_num_parts;
3906         heads_by_sects = sdebug_heads * sdebug_sectors_per;
3907         starts[0] = sdebug_sectors_per;
3908         for (k = 1; k < scsi_debug_num_parts; ++k)
3909                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3910                             * heads_by_sects;
3911         starts[scsi_debug_num_parts] = num_sectors;
3912         starts[scsi_debug_num_parts + 1] = 0;
3913
3914         ramp[510] = 0x55;       /* magic partition markings */
3915         ramp[511] = 0xAA;
3916         pp = (struct partition *)(ramp + 0x1be);
3917         for (k = 0; starts[k + 1]; ++k, ++pp) {
3918                 start_sec = starts[k];
3919                 end_sec = starts[k + 1] - 1;
3920                 pp->boot_ind = 0;
3921
3922                 pp->cyl = start_sec / heads_by_sects;
3923                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3924                            / sdebug_sectors_per;
3925                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3926
3927                 pp->end_cyl = end_sec / heads_by_sects;
3928                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3929                                / sdebug_sectors_per;
3930                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3931
3932                 pp->start_sect = cpu_to_le32(start_sec);
3933                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3934                 pp->sys_ind = 0x83;     /* plain Linux partition */
3935         }
3936 }
3937
3938 static int
3939 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3940               int scsi_result, int delta_jiff)
3941 {
3942         unsigned long iflags;
3943         int k, num_in_q, qdepth, inject;
3944         struct sdebug_queued_cmd *sqcp = NULL;
3945         struct scsi_device *sdp = cmnd->device;
3946
3947         if (NULL == cmnd || NULL == devip) {
3948                 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3949                         __func__);
3950                 /* no particularly good error to report back */
3951                 return SCSI_MLQUEUE_HOST_BUSY;
3952         }
3953         if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3954                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3955                             __func__, scsi_result);
3956         if (delta_jiff == 0)
3957                 goto respond_in_thread;
3958
3959         /* schedule the response at a later time if resources permit */
3960         spin_lock_irqsave(&queued_arr_lock, iflags);
3961         num_in_q = atomic_read(&devip->num_in_q);
3962         qdepth = cmnd->device->queue_depth;
3963         inject = 0;
3964         if ((qdepth > 0) && (num_in_q >= qdepth)) {
3965                 if (scsi_result) {
3966                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3967                         goto respond_in_thread;
3968                 } else
3969                         scsi_result = device_qfull_result;
3970         } else if ((scsi_debug_every_nth != 0) &&
3971                    (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3972                    (scsi_result == 0)) {
3973                 if ((num_in_q == (qdepth - 1)) &&
3974                     (atomic_inc_return(&sdebug_a_tsf) >=
3975                      abs(scsi_debug_every_nth))) {
3976                         atomic_set(&sdebug_a_tsf, 0);
3977                         inject = 1;
3978                         scsi_result = device_qfull_result;
3979                 }
3980         }
3981
3982         k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3983         if (k >= scsi_debug_max_queue) {
3984                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3985                 if (scsi_result)
3986                         goto respond_in_thread;
3987                 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3988                         scsi_result = device_qfull_result;
3989                 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3990                         sdev_printk(KERN_INFO, sdp,
3991                                     "%s: max_queue=%d exceeded, %s\n",
3992                                     __func__, scsi_debug_max_queue,
3993                                     (scsi_result ?  "status: TASK SET FULL" :
3994                                                     "report: host busy"));
3995                 if (scsi_result)
3996                         goto respond_in_thread;
3997                 else
3998                         return SCSI_MLQUEUE_HOST_BUSY;
3999         }
4000         __set_bit(k, queued_in_use_bm);
4001         atomic_inc(&devip->num_in_q);
4002         sqcp = &queued_arr[k];
4003         sqcp->a_cmnd = cmnd;
4004         cmnd->result = scsi_result;
4005         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4006         if (delta_jiff > 0) {
4007                 if (NULL == sqcp->cmnd_timerp) {
4008                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
4009                                                     GFP_ATOMIC);
4010                         if (NULL == sqcp->cmnd_timerp)
4011                                 return SCSI_MLQUEUE_HOST_BUSY;
4012                         init_timer(sqcp->cmnd_timerp);
4013                 }
4014                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
4015                 sqcp->cmnd_timerp->data = k;
4016                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
4017                 add_timer(sqcp->cmnd_timerp);
4018         } else if (scsi_debug_ndelay > 0) {
4019                 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
4020                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
4021
4022                 if (NULL == sd_hp) {
4023                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
4024                         if (NULL == sd_hp)
4025                                 return SCSI_MLQUEUE_HOST_BUSY;
4026                         sqcp->sd_hrtp = sd_hp;
4027                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
4028                                      HRTIMER_MODE_REL);
4029                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
4030                         sd_hp->qa_indx = k;
4031                 }
4032                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
4033         } else {        /* delay < 0 */
4034                 if (NULL == sqcp->tletp) {
4035                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
4036                                               GFP_ATOMIC);
4037                         if (NULL == sqcp->tletp)
4038                                 return SCSI_MLQUEUE_HOST_BUSY;
4039                         tasklet_init(sqcp->tletp,
4040                                      sdebug_q_cmd_complete, k);
4041                 }
4042                 if (-1 == delta_jiff)
4043                         tasklet_hi_schedule(sqcp->tletp);
4044                 else
4045                         tasklet_schedule(sqcp->tletp);
4046         }
4047         if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
4048             (scsi_result == device_qfull_result))
4049                 sdev_printk(KERN_INFO, sdp,
4050                             "%s: num_in_q=%d +1, %s%s\n", __func__,
4051                             num_in_q, (inject ? "<inject> " : ""),
4052                             "status: TASK SET FULL");
4053         return 0;
4054
4055 respond_in_thread:      /* call back to mid-layer using invocation thread */
4056         cmnd->result = scsi_result;
4057         cmnd->scsi_done(cmnd);
4058         return 0;
4059 }
4060
4061 /* Note: The following macros create attribute files in the
4062    /sys/module/scsi_debug/parameters directory. Unfortunately this
4063    driver is unaware of a change and cannot trigger auxiliary actions
4064    as it can when the corresponding attribute in the
4065    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4066  */
4067 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
4068 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
4069 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
4070 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
4071 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
4072 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
4073 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
4074 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
4075 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
4076 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
4077 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
4078 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
4079 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
4080 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
4081 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
4082 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
4083 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
4084 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
4085 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
4086 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
4087 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
4088 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
4089 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
4090 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
4091 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
4092 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
4093 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
4094 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
4095 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
4096 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
4097 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
4098 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
4099 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
4100 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
4101 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4102 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4103 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4104 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4105                    S_IRUGO | S_IWUSR);
4106 module_param_named(write_same_length, scsi_debug_write_same_length, int,
4107                    S_IRUGO | S_IWUSR);
4108
4109 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4110 MODULE_DESCRIPTION("SCSI debug adapter driver");
4111 MODULE_LICENSE("GPL");
4112 MODULE_VERSION(SCSI_DEBUG_VERSION);
4113
4114 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4115 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4116 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4117 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4118 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4119 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4120 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4121 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4122 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4123 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4124 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4125 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4126 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4127 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4128 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4129 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4130 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4131 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4132 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4133 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4134 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4135 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4136 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4137 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4138 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
4139 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4140 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4141 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4142 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4143 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4144 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4145 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4146 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4147 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4148 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4149 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4150 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4151 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4152 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4153
4154 static char sdebug_info[256];
4155
4156 static const char * scsi_debug_info(struct Scsi_Host * shp)
4157 {
4158         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4159                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4160                 scsi_debug_version_date, scsi_debug_dev_size_mb,
4161                 scsi_debug_opts);
4162         return sdebug_info;
4163 }
4164
4165 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4166 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4167 {
4168         char arr[16];
4169         int opts;
4170         int minLen = length > 15 ? 15 : length;
4171
4172         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4173                 return -EACCES;
4174         memcpy(arr, buffer, minLen);
4175         arr[minLen] = '\0';
4176         if (1 != sscanf(arr, "%d", &opts))
4177                 return -EINVAL;
4178         scsi_debug_opts = opts;
4179         if (scsi_debug_every_nth != 0)
4180                 atomic_set(&sdebug_cmnd_count, 0);
4181         return length;
4182 }
4183
4184 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4185  * same for each scsi_debug host (if more than one). Some of the counters
4186  * output are not atomics so might be inaccurate in a busy system. */
4187 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4188 {
4189         int f, l;
4190         char b[32];
4191
4192         if (scsi_debug_every_nth > 0)
4193                 snprintf(b, sizeof(b), " (curr:%d)",
4194                          ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4195                                 atomic_read(&sdebug_a_tsf) :
4196                                 atomic_read(&sdebug_cmnd_count)));
4197         else
4198                 b[0] = '\0';
4199
4200         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4201                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4202                 "every_nth=%d%s\n"
4203                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4204                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4205                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4206                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4207                 "usec_in_jiffy=%lu\n",
4208                 SCSI_DEBUG_VERSION, scsi_debug_version_date,
4209                 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4210                 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4211                 scsi_debug_max_luns, atomic_read(&sdebug_completions),
4212                 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4213                 sdebug_sectors_per, num_aborts, num_dev_resets,
4214                 num_target_resets, num_bus_resets, num_host_resets,
4215                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4216
4217         f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4218         if (f != scsi_debug_max_queue) {
4219                 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4220                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4221                            "queued_in_use_bm", f, l);
4222         }
4223         return 0;
4224 }
4225
4226 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4227 {
4228         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4229 }
4230 /* Returns -EBUSY if delay is being changed and commands are queued */
4231 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4232                            size_t count)
4233 {
4234         int delay, res;
4235
4236         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4237                 res = count;
4238                 if (scsi_debug_delay != delay) {
4239                         unsigned long iflags;
4240                         int k;
4241
4242                         spin_lock_irqsave(&queued_arr_lock, iflags);
4243                         k = find_first_bit(queued_in_use_bm,
4244                                            scsi_debug_max_queue);
4245                         if (k != scsi_debug_max_queue)
4246                                 res = -EBUSY;   /* have queued commands */
4247                         else {
4248                                 scsi_debug_delay = delay;
4249                                 scsi_debug_ndelay = 0;
4250                         }
4251                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4252                 }
4253                 return res;
4254         }
4255         return -EINVAL;
4256 }
4257 static DRIVER_ATTR_RW(delay);
4258
4259 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4260 {
4261         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4262 }
4263 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4264 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4265 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4266                            size_t count)
4267 {
4268         unsigned long iflags;
4269         int ndelay, res, k;
4270
4271         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4272             (ndelay >= 0) && (ndelay < 1000000000)) {
4273                 res = count;
4274                 if (scsi_debug_ndelay != ndelay) {
4275                         spin_lock_irqsave(&queued_arr_lock, iflags);
4276                         k = find_first_bit(queued_in_use_bm,
4277                                            scsi_debug_max_queue);
4278                         if (k != scsi_debug_max_queue)
4279                                 res = -EBUSY;   /* have queued commands */
4280                         else {
4281                                 scsi_debug_ndelay = ndelay;
4282                                 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4283                                                           : DEF_DELAY;
4284                         }
4285                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4286                 }
4287                 return res;
4288         }
4289         return -EINVAL;
4290 }
4291 static DRIVER_ATTR_RW(ndelay);
4292
4293 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4294 {
4295         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4296 }
4297
4298 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4299                           size_t count)
4300 {
4301         int opts;
4302         char work[20];
4303
4304         if (1 == sscanf(buf, "%10s", work)) {
4305                 if (0 == strncasecmp(work,"0x", 2)) {
4306                         if (1 == sscanf(&work[2], "%x", &opts))
4307                                 goto opts_done;
4308                 } else {
4309                         if (1 == sscanf(work, "%d", &opts))
4310                                 goto opts_done;
4311                 }
4312         }
4313         return -EINVAL;
4314 opts_done:
4315         scsi_debug_opts = opts;
4316         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4317                 sdebug_any_injecting_opt = true;
4318         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4319                 sdebug_any_injecting_opt = true;
4320         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4321                 sdebug_any_injecting_opt = true;
4322         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4323                 sdebug_any_injecting_opt = true;
4324         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4325                 sdebug_any_injecting_opt = true;
4326         atomic_set(&sdebug_cmnd_count, 0);
4327         atomic_set(&sdebug_a_tsf, 0);
4328         return count;
4329 }
4330 static DRIVER_ATTR_RW(opts);
4331
4332 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4333 {
4334         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4335 }
4336 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4337                            size_t count)
4338 {
4339         int n;
4340
4341         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4342                 scsi_debug_ptype = n;
4343                 return count;
4344         }
4345         return -EINVAL;
4346 }
4347 static DRIVER_ATTR_RW(ptype);
4348
4349 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4350 {
4351         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4352 }
4353 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4354                             size_t count)
4355 {
4356         int n;
4357
4358         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4359                 scsi_debug_dsense = n;
4360                 return count;
4361         }
4362         return -EINVAL;
4363 }
4364 static DRIVER_ATTR_RW(dsense);
4365
4366 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4367 {
4368         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4369 }
4370 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4371                              size_t count)
4372 {
4373         int n;
4374
4375         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4376                 n = (n > 0);
4377                 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4378                 if (scsi_debug_fake_rw != n) {
4379                         if ((0 == n) && (NULL == fake_storep)) {
4380                                 unsigned long sz =
4381                                         (unsigned long)scsi_debug_dev_size_mb *
4382                                         1048576;
4383
4384                                 fake_storep = vmalloc(sz);
4385                                 if (NULL == fake_storep) {
4386                                         pr_err("%s: out of memory, 9\n",
4387                                                __func__);
4388                                         return -ENOMEM;
4389                                 }
4390                                 memset(fake_storep, 0, sz);
4391                         }
4392                         scsi_debug_fake_rw = n;
4393                 }
4394                 return count;
4395         }
4396         return -EINVAL;
4397 }
4398 static DRIVER_ATTR_RW(fake_rw);
4399
4400 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4401 {
4402         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4403 }
4404 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4405                               size_t count)
4406 {
4407         int n;
4408
4409         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4410                 scsi_debug_no_lun_0 = n;
4411                 return count;
4412         }
4413         return -EINVAL;
4414 }
4415 static DRIVER_ATTR_RW(no_lun_0);
4416
4417 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4418 {
4419         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4420 }
4421 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4422                               size_t count)
4423 {
4424         int n;
4425
4426         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4427                 scsi_debug_num_tgts = n;
4428                 sdebug_max_tgts_luns();
4429                 return count;
4430         }
4431         return -EINVAL;
4432 }
4433 static DRIVER_ATTR_RW(num_tgts);
4434
4435 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4436 {
4437         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4438 }
4439 static DRIVER_ATTR_RO(dev_size_mb);
4440
4441 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4442 {
4443         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4444 }
4445 static DRIVER_ATTR_RO(num_parts);
4446
4447 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4448 {
4449         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4450 }
4451 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4452                                size_t count)
4453 {
4454         int nth;
4455
4456         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4457                 scsi_debug_every_nth = nth;
4458                 atomic_set(&sdebug_cmnd_count, 0);
4459                 return count;
4460         }
4461         return -EINVAL;
4462 }
4463 static DRIVER_ATTR_RW(every_nth);
4464
4465 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4466 {
4467         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4468 }
4469 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4470                               size_t count)
4471 {
4472         int n;
4473         bool changed;
4474
4475         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4476                 changed = (scsi_debug_max_luns != n);
4477                 scsi_debug_max_luns = n;
4478                 sdebug_max_tgts_luns();
4479                 if (changed && (scsi_debug_scsi_level >= 5)) {  /* >= SPC-3 */
4480                         struct sdebug_host_info *sdhp;
4481                         struct sdebug_dev_info *dp;
4482
4483                         spin_lock(&sdebug_host_list_lock);
4484                         list_for_each_entry(sdhp, &sdebug_host_list,
4485                                             host_list) {
4486                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4487                                                     dev_list) {
4488                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
4489                                                 dp->uas_bm);
4490                                 }
4491                         }
4492                         spin_unlock(&sdebug_host_list_lock);
4493                 }
4494                 return count;
4495         }
4496         return -EINVAL;
4497 }
4498 static DRIVER_ATTR_RW(max_luns);
4499
4500 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4501 {
4502         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4503 }
4504 /* N.B. max_queue can be changed while there are queued commands. In flight
4505  * commands beyond the new max_queue will be completed. */
4506 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4507                                size_t count)
4508 {
4509         unsigned long iflags;
4510         int n, k;
4511
4512         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4513             (n <= SCSI_DEBUG_CANQUEUE)) {
4514                 spin_lock_irqsave(&queued_arr_lock, iflags);
4515                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4516                 scsi_debug_max_queue = n;
4517                 if (SCSI_DEBUG_CANQUEUE == k)
4518                         atomic_set(&retired_max_queue, 0);
4519                 else if (k >= n)
4520                         atomic_set(&retired_max_queue, k + 1);
4521                 else
4522                         atomic_set(&retired_max_queue, 0);
4523                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4524                 return count;
4525         }
4526         return -EINVAL;
4527 }
4528 static DRIVER_ATTR_RW(max_queue);
4529
4530 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4531 {
4532         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4533 }
4534 static DRIVER_ATTR_RO(no_uld);
4535
4536 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4537 {
4538         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4539 }
4540 static DRIVER_ATTR_RO(scsi_level);
4541
4542 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4543 {
4544         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4545 }
4546 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4547                                 size_t count)
4548 {
4549         int n;
4550         bool changed;
4551
4552         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4553                 changed = (scsi_debug_virtual_gb != n);
4554                 scsi_debug_virtual_gb = n;
4555                 sdebug_capacity = get_sdebug_capacity();
4556                 if (changed) {
4557                         struct sdebug_host_info *sdhp;
4558                         struct sdebug_dev_info *dp;
4559
4560                         spin_lock(&sdebug_host_list_lock);
4561                         list_for_each_entry(sdhp, &sdebug_host_list,
4562                                             host_list) {
4563                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4564                                                     dev_list) {
4565                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4566                                                 dp->uas_bm);
4567                                 }
4568                         }
4569                         spin_unlock(&sdebug_host_list_lock);
4570                 }
4571                 return count;
4572         }
4573         return -EINVAL;
4574 }
4575 static DRIVER_ATTR_RW(virtual_gb);
4576
4577 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4578 {
4579         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4580 }
4581
4582 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4583                               size_t count)
4584 {
4585         int delta_hosts;
4586
4587         if (sscanf(buf, "%d", &delta_hosts) != 1)
4588                 return -EINVAL;
4589         if (delta_hosts > 0) {
4590                 do {
4591                         sdebug_add_adapter();
4592                 } while (--delta_hosts);
4593         } else if (delta_hosts < 0) {
4594                 do {
4595                         sdebug_remove_adapter();
4596                 } while (++delta_hosts);
4597         }
4598         return count;
4599 }
4600 static DRIVER_ATTR_RW(add_host);
4601
4602 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4603 {
4604         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4605 }
4606 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4607                                     size_t count)
4608 {
4609         int n;
4610
4611         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4612                 scsi_debug_vpd_use_hostno = n;
4613                 return count;
4614         }
4615         return -EINVAL;
4616 }
4617 static DRIVER_ATTR_RW(vpd_use_hostno);
4618
4619 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4620 {
4621         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4622 }
4623 static DRIVER_ATTR_RO(sector_size);
4624
4625 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4626 {
4627         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4628 }
4629 static DRIVER_ATTR_RO(dix);
4630
4631 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4632 {
4633         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4634 }
4635 static DRIVER_ATTR_RO(dif);
4636
4637 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4638 {
4639         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4640 }
4641 static DRIVER_ATTR_RO(guard);
4642
4643 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4644 {
4645         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4646 }
4647 static DRIVER_ATTR_RO(ato);
4648
4649 static ssize_t map_show(struct device_driver *ddp, char *buf)
4650 {
4651         ssize_t count;
4652
4653         if (!scsi_debug_lbp())
4654                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4655                                  sdebug_store_sectors);
4656
4657         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4658                           (int)map_size, map_storep);
4659         buf[count++] = '\n';
4660         buf[count] = '\0';
4661
4662         return count;
4663 }
4664 static DRIVER_ATTR_RO(map);
4665
4666 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4667 {
4668         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4669 }
4670 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4671                                size_t count)
4672 {
4673         int n;
4674
4675         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4676                 scsi_debug_removable = (n > 0);
4677                 return count;
4678         }
4679         return -EINVAL;
4680 }
4681 static DRIVER_ATTR_RW(removable);
4682
4683 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4684 {
4685         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4686 }
4687 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4688 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4689                                size_t count)
4690 {
4691         int n, res;
4692
4693         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4694                 bool new_host_lock = (n > 0);
4695
4696                 res = count;
4697                 if (new_host_lock != scsi_debug_host_lock) {
4698                         unsigned long iflags;
4699                         int k;
4700
4701                         spin_lock_irqsave(&queued_arr_lock, iflags);
4702                         k = find_first_bit(queued_in_use_bm,
4703                                            scsi_debug_max_queue);
4704                         if (k != scsi_debug_max_queue)
4705                                 res = -EBUSY;   /* have queued commands */
4706                         else
4707                                 scsi_debug_host_lock = new_host_lock;
4708                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4709                 }
4710                 return res;
4711         }
4712         return -EINVAL;
4713 }
4714 static DRIVER_ATTR_RW(host_lock);
4715
4716 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4717 {
4718         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4719 }
4720 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4721                             size_t count)
4722 {
4723         int n;
4724
4725         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4726                 scsi_debug_strict = (n > 0);
4727                 return count;
4728         }
4729         return -EINVAL;
4730 }
4731 static DRIVER_ATTR_RW(strict);
4732
4733
4734 /* Note: The following array creates attribute files in the
4735    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4736    files (over those found in the /sys/module/scsi_debug/parameters
4737    directory) is that auxiliary actions can be triggered when an attribute
4738    is changed. For example see: sdebug_add_host_store() above.
4739  */
4740
4741 static struct attribute *sdebug_drv_attrs[] = {
4742         &driver_attr_delay.attr,
4743         &driver_attr_opts.attr,
4744         &driver_attr_ptype.attr,
4745         &driver_attr_dsense.attr,
4746         &driver_attr_fake_rw.attr,
4747         &driver_attr_no_lun_0.attr,
4748         &driver_attr_num_tgts.attr,
4749         &driver_attr_dev_size_mb.attr,
4750         &driver_attr_num_parts.attr,
4751         &driver_attr_every_nth.attr,
4752         &driver_attr_max_luns.attr,
4753         &driver_attr_max_queue.attr,
4754         &driver_attr_no_uld.attr,
4755         &driver_attr_scsi_level.attr,
4756         &driver_attr_virtual_gb.attr,
4757         &driver_attr_add_host.attr,
4758         &driver_attr_vpd_use_hostno.attr,
4759         &driver_attr_sector_size.attr,
4760         &driver_attr_dix.attr,
4761         &driver_attr_dif.attr,
4762         &driver_attr_guard.attr,
4763         &driver_attr_ato.attr,
4764         &driver_attr_map.attr,
4765         &driver_attr_removable.attr,
4766         &driver_attr_host_lock.attr,
4767         &driver_attr_ndelay.attr,
4768         &driver_attr_strict.attr,
4769         NULL,
4770 };
4771 ATTRIBUTE_GROUPS(sdebug_drv);
4772
4773 static struct device *pseudo_primary;
4774
4775 static int __init scsi_debug_init(void)
4776 {
4777         unsigned long sz;
4778         int host_to_add;
4779         int k;
4780         int ret;
4781
4782         atomic_set(&sdebug_cmnd_count, 0);
4783         atomic_set(&sdebug_completions, 0);
4784         atomic_set(&retired_max_queue, 0);
4785
4786         if (scsi_debug_ndelay >= 1000000000) {
4787                 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
4788                         __func__);
4789                 scsi_debug_ndelay = 0;
4790         } else if (scsi_debug_ndelay > 0)
4791                 scsi_debug_delay = DELAY_OVERRIDDEN;
4792
4793         switch (scsi_debug_sector_size) {
4794         case  512:
4795         case 1024:
4796         case 2048:
4797         case 4096:
4798                 break;
4799         default:
4800                 pr_err("%s: invalid sector_size %d\n", __func__,
4801                        scsi_debug_sector_size);
4802                 return -EINVAL;
4803         }
4804
4805         switch (scsi_debug_dif) {
4806
4807         case SD_DIF_TYPE0_PROTECTION:
4808         case SD_DIF_TYPE1_PROTECTION:
4809         case SD_DIF_TYPE2_PROTECTION:
4810         case SD_DIF_TYPE3_PROTECTION:
4811                 break;
4812
4813         default:
4814                 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
4815                 return -EINVAL;
4816         }
4817
4818         if (scsi_debug_guard > 1) {
4819                 pr_err("%s: guard must be 0 or 1\n", __func__);
4820                 return -EINVAL;
4821         }
4822
4823         if (scsi_debug_ato > 1) {
4824                 pr_err("%s: ato must be 0 or 1\n", __func__);
4825                 return -EINVAL;
4826         }
4827
4828         if (scsi_debug_physblk_exp > 15) {
4829                 pr_err("%s: invalid physblk_exp %u\n", __func__,
4830                        scsi_debug_physblk_exp);
4831                 return -EINVAL;
4832         }
4833
4834         if (scsi_debug_lowest_aligned > 0x3fff) {
4835                 pr_err("%s: lowest_aligned too big: %u\n", __func__,
4836                        scsi_debug_lowest_aligned);
4837                 return -EINVAL;
4838         }
4839
4840         if (scsi_debug_dev_size_mb < 1)
4841                 scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4842         sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4843         sdebug_store_sectors = sz / scsi_debug_sector_size;
4844         sdebug_capacity = get_sdebug_capacity();
4845
4846         /* play around with geometry, don't waste too much on track 0 */
4847         sdebug_heads = 8;
4848         sdebug_sectors_per = 32;
4849         if (scsi_debug_dev_size_mb >= 16)
4850                 sdebug_heads = 32;
4851         else if (scsi_debug_dev_size_mb >= 256)
4852                 sdebug_heads = 64;
4853         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4854                                (sdebug_sectors_per * sdebug_heads);
4855         if (sdebug_cylinders_per >= 1024) {
4856                 /* other LLDs do this; implies >= 1GB ram disk ... */
4857                 sdebug_heads = 255;
4858                 sdebug_sectors_per = 63;
4859                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4860                                (sdebug_sectors_per * sdebug_heads);
4861         }
4862
4863         if (0 == scsi_debug_fake_rw) {
4864                 fake_storep = vmalloc(sz);
4865                 if (NULL == fake_storep) {
4866                         pr_err("%s: out of memory, 1\n", __func__);
4867                         return -ENOMEM;
4868                 }
4869                 memset(fake_storep, 0, sz);
4870                 if (scsi_debug_num_parts > 0)
4871                         sdebug_build_parts(fake_storep, sz);
4872         }
4873
4874         if (scsi_debug_dix) {
4875                 int dif_size;
4876
4877                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4878                 dif_storep = vmalloc(dif_size);
4879
4880                 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
4881                         dif_storep);
4882
4883                 if (dif_storep == NULL) {
4884                         pr_err("%s: out of mem. (DIX)\n", __func__);
4885                         ret = -ENOMEM;
4886                         goto free_vm;
4887                 }
4888
4889                 memset(dif_storep, 0xff, dif_size);
4890         }
4891
4892         /* Logical Block Provisioning */
4893         if (scsi_debug_lbp()) {
4894                 scsi_debug_unmap_max_blocks =
4895                         clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4896
4897                 scsi_debug_unmap_max_desc =
4898                         clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4899
4900                 scsi_debug_unmap_granularity =
4901                         clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4902
4903                 if (scsi_debug_unmap_alignment &&
4904                     scsi_debug_unmap_granularity <=
4905                     scsi_debug_unmap_alignment) {
4906                         pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
4907                                __func__);
4908                         return -EINVAL;
4909                 }
4910
4911                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4912                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4913
4914                 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
4915
4916                 if (map_storep == NULL) {
4917                         pr_err("%s: out of mem. (MAP)\n", __func__);
4918                         ret = -ENOMEM;
4919                         goto free_vm;
4920                 }
4921
4922                 bitmap_zero(map_storep, map_size);
4923
4924                 /* Map first 1KB for partition table */
4925                 if (scsi_debug_num_parts)
4926                         map_region(0, 2);
4927         }
4928
4929         pseudo_primary = root_device_register("pseudo_0");
4930         if (IS_ERR(pseudo_primary)) {
4931                 pr_warn("%s: root_device_register() error\n", __func__);
4932                 ret = PTR_ERR(pseudo_primary);
4933                 goto free_vm;
4934         }
4935         ret = bus_register(&pseudo_lld_bus);
4936         if (ret < 0) {
4937                 pr_warn("%s: bus_register error: %d\n", __func__, ret);
4938                 goto dev_unreg;
4939         }
4940         ret = driver_register(&sdebug_driverfs_driver);
4941         if (ret < 0) {
4942                 pr_warn("%s: driver_register error: %d\n", __func__, ret);
4943                 goto bus_unreg;
4944         }
4945
4946         host_to_add = scsi_debug_add_host;
4947         scsi_debug_add_host = 0;
4948
4949         for (k = 0; k < host_to_add; k++) {
4950                 if (sdebug_add_adapter()) {
4951                         pr_err("%s: sdebug_add_adapter failed k=%d\n",
4952                                 __func__, k);
4953                         break;
4954                 }
4955         }
4956
4957         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4958                 pr_info("%s: built %d host(s)\n", __func__,
4959                         scsi_debug_add_host);
4960         }
4961         return 0;
4962
4963 bus_unreg:
4964         bus_unregister(&pseudo_lld_bus);
4965 dev_unreg:
4966         root_device_unregister(pseudo_primary);
4967 free_vm:
4968         if (map_storep)
4969                 vfree(map_storep);
4970         if (dif_storep)
4971                 vfree(dif_storep);
4972         vfree(fake_storep);
4973
4974         return ret;
4975 }
4976
4977 static void __exit scsi_debug_exit(void)
4978 {
4979         int k = scsi_debug_add_host;
4980
4981         stop_all_queued();
4982         free_all_queued();
4983         for (; k; k--)
4984                 sdebug_remove_adapter();
4985         driver_unregister(&sdebug_driverfs_driver);
4986         bus_unregister(&pseudo_lld_bus);
4987         root_device_unregister(pseudo_primary);
4988
4989         if (dif_storep)
4990                 vfree(dif_storep);
4991
4992         vfree(fake_storep);
4993 }
4994
4995 device_initcall(scsi_debug_init);
4996 module_exit(scsi_debug_exit);
4997
4998 static void sdebug_release_adapter(struct device * dev)
4999 {
5000         struct sdebug_host_info *sdbg_host;
5001
5002         sdbg_host = to_sdebug_host(dev);
5003         kfree(sdbg_host);
5004 }
5005
5006 static int sdebug_add_adapter(void)
5007 {
5008         int k, devs_per_host;
5009         int error = 0;
5010         struct sdebug_host_info *sdbg_host;
5011         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5012
5013         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5014         if (NULL == sdbg_host) {
5015                 printk(KERN_ERR "%s: out of memory at line %d\n",
5016                        __func__, __LINE__);
5017                 return -ENOMEM;
5018         }
5019
5020         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5021
5022         devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
5023         for (k = 0; k < devs_per_host; k++) {
5024                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5025                 if (!sdbg_devinfo) {
5026                         printk(KERN_ERR "%s: out of memory at line %d\n",
5027                                __func__, __LINE__);
5028                         error = -ENOMEM;
5029                         goto clean;
5030                 }
5031         }
5032
5033         spin_lock(&sdebug_host_list_lock);
5034         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5035         spin_unlock(&sdebug_host_list_lock);
5036
5037         sdbg_host->dev.bus = &pseudo_lld_bus;
5038         sdbg_host->dev.parent = pseudo_primary;
5039         sdbg_host->dev.release = &sdebug_release_adapter;
5040         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
5041
5042         error = device_register(&sdbg_host->dev);
5043
5044         if (error)
5045                 goto clean;
5046
5047         ++scsi_debug_add_host;
5048         return error;
5049
5050 clean:
5051         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5052                                  dev_list) {
5053                 list_del(&sdbg_devinfo->dev_list);
5054                 kfree(sdbg_devinfo);
5055         }
5056
5057         kfree(sdbg_host);
5058         return error;
5059 }
5060
5061 static void sdebug_remove_adapter(void)
5062 {
5063         struct sdebug_host_info * sdbg_host = NULL;
5064
5065         spin_lock(&sdebug_host_list_lock);
5066         if (!list_empty(&sdebug_host_list)) {
5067                 sdbg_host = list_entry(sdebug_host_list.prev,
5068                                        struct sdebug_host_info, host_list);
5069                 list_del(&sdbg_host->host_list);
5070         }
5071         spin_unlock(&sdebug_host_list_lock);
5072
5073         if (!sdbg_host)
5074                 return;
5075
5076         device_unregister(&sdbg_host->dev);
5077         --scsi_debug_add_host;
5078 }
5079
5080 static int
5081 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5082 {
5083         int num_in_q = 0;
5084         unsigned long iflags;
5085         struct sdebug_dev_info *devip;
5086
5087         spin_lock_irqsave(&queued_arr_lock, iflags);
5088         devip = (struct sdebug_dev_info *)sdev->hostdata;
5089         if (NULL == devip) {
5090                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
5091                 return  -ENODEV;
5092         }
5093         num_in_q = atomic_read(&devip->num_in_q);
5094         spin_unlock_irqrestore(&queued_arr_lock, iflags);
5095
5096         if (qdepth < 1)
5097                 qdepth = 1;
5098         /* allow to exceed max host queued_arr elements for testing */
5099         if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
5100                 qdepth = SCSI_DEBUG_CANQUEUE + 10;
5101         scsi_change_queue_depth(sdev, qdepth);
5102
5103         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
5104                 sdev_printk(KERN_INFO, sdev,
5105                             "%s: qdepth=%d, num_in_q=%d\n",
5106                             __func__, qdepth, num_in_q);
5107         }
5108         return sdev->queue_depth;
5109 }
5110
5111 static int
5112 check_inject(struct scsi_cmnd *scp)
5113 {
5114         struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5115
5116         memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5117
5118         if (atomic_inc_return(&sdebug_cmnd_count) >=
5119             abs(scsi_debug_every_nth)) {
5120                 atomic_set(&sdebug_cmnd_count, 0);
5121                 if (scsi_debug_every_nth < -1)
5122                         scsi_debug_every_nth = -1;
5123                 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5124                         return 1; /* ignore command causing timeout */
5125                 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5126                          scsi_medium_access_command(scp))
5127                         return 1; /* time out reads and writes */
5128                 if (sdebug_any_injecting_opt) {
5129                         int opts = scsi_debug_opts;
5130
5131                         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5132                                 ep->inj_recovered = true;
5133                         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5134                                 ep->inj_transport = true;
5135                         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5136                                 ep->inj_dif = true;
5137                         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5138                                 ep->inj_dix = true;
5139                         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5140                                 ep->inj_short = true;
5141                 }
5142         }
5143         return 0;
5144 }
5145
5146 static int
5147 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5148 {
5149         u8 sdeb_i;
5150         struct scsi_device *sdp = scp->device;
5151         const struct opcode_info_t *oip;
5152         const struct opcode_info_t *r_oip;
5153         struct sdebug_dev_info *devip;
5154         u8 *cmd = scp->cmnd;
5155         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5156         int k, na;
5157         int errsts = 0;
5158         int errsts_no_connect = DID_NO_CONNECT << 16;
5159         u32 flags;
5160         u16 sa;
5161         u8 opcode = cmd[0];
5162         bool has_wlun_rl;
5163         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5164
5165         scsi_set_resid(scp, 0);
5166         if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5167                 char b[120];
5168                 int n, len, sb;
5169
5170                 len = scp->cmd_len;
5171                 sb = (int)sizeof(b);
5172                 if (len > 32)
5173                         strcpy(b, "too long, over 32 bytes");
5174                 else {
5175                         for (k = 0, n = 0; k < len && n < sb; ++k)
5176                                 n += scnprintf(b + n, sb - n, "%02x ",
5177                                                (u32)cmd[k]);
5178                 }
5179                 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5180         }
5181         has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5182         if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5183                 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5184
5185         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5186         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5187         devip = (struct sdebug_dev_info *)sdp->hostdata;
5188         if (!devip) {
5189                 devip = devInfoReg(sdp);
5190                 if (NULL == devip)
5191                         return schedule_resp(scp, NULL, errsts_no_connect, 0);
5192         }
5193         na = oip->num_attached;
5194         r_pfp = oip->pfp;
5195         if (na) {       /* multiple commands with this opcode */
5196                 r_oip = oip;
5197                 if (FF_SA & r_oip->flags) {
5198                         if (F_SA_LOW & oip->flags)
5199                                 sa = 0x1f & cmd[1];
5200                         else
5201                                 sa = get_unaligned_be16(cmd + 8);
5202                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5203                                 if (opcode == oip->opcode && sa == oip->sa)
5204                                         break;
5205                         }
5206                 } else {   /* since no service action only check opcode */
5207                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5208                                 if (opcode == oip->opcode)
5209                                         break;
5210                         }
5211                 }
5212                 if (k > na) {
5213                         if (F_SA_LOW & r_oip->flags)
5214                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5215                         else if (F_SA_HIGH & r_oip->flags)
5216                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5217                         else
5218                                 mk_sense_invalid_opcode(scp);
5219                         goto check_cond;
5220                 }
5221         }       /* else (when na==0) we assume the oip is a match */
5222         flags = oip->flags;
5223         if (F_INV_OP & flags) {
5224                 mk_sense_invalid_opcode(scp);
5225                 goto check_cond;
5226         }
5227         if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5228                 if (debug)
5229                         sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5230                                     "0x%x not supported for wlun\n", opcode);
5231                 mk_sense_invalid_opcode(scp);
5232                 goto check_cond;
5233         }
5234         if (scsi_debug_strict) {        /* check cdb against mask */
5235                 u8 rem;
5236                 int j;
5237
5238                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5239                         rem = ~oip->len_mask[k] & cmd[k];
5240                         if (rem) {
5241                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5242                                         if (0x80 & rem)
5243                                                 break;
5244                                 }
5245                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5246                                 goto check_cond;
5247                         }
5248                 }
5249         }
5250         if (!(F_SKIP_UA & flags) &&
5251             SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5252                 errsts = check_readiness(scp, UAS_ONLY, devip);
5253                 if (errsts)
5254                         goto check_cond;
5255         }
5256         if ((F_M_ACCESS & flags) && devip->stopped) {
5257                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5258                 if (debug)
5259                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5260                                     "%s\n", my_name, "initializing command "
5261                                     "required");
5262                 errsts = check_condition_result;
5263                 goto fini;
5264         }
5265         if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5266                 goto fini;
5267         if (scsi_debug_every_nth) {
5268                 if (check_inject(scp))
5269                         return 0;       /* ignore command: make trouble */
5270         }
5271         if (oip->pfp)   /* if this command has a resp_* function, call it */
5272                 errsts = oip->pfp(scp, devip);
5273         else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5274                 errsts = r_pfp(scp, devip);
5275
5276 fini:
5277         return schedule_resp(scp, devip, errsts,
5278                              ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5279 check_cond:
5280         return schedule_resp(scp, devip, check_condition_result, 0);
5281 }
5282
5283 static int
5284 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5285 {
5286         if (scsi_debug_host_lock) {
5287                 unsigned long iflags;
5288                 int rc;
5289
5290                 spin_lock_irqsave(shost->host_lock, iflags);
5291                 rc = scsi_debug_queuecommand(cmd);
5292                 spin_unlock_irqrestore(shost->host_lock, iflags);
5293                 return rc;
5294         } else
5295                 return scsi_debug_queuecommand(cmd);
5296 }
5297
5298 static struct scsi_host_template sdebug_driver_template = {
5299         .show_info =            scsi_debug_show_info,
5300         .write_info =           scsi_debug_write_info,
5301         .proc_name =            sdebug_proc_name,
5302         .name =                 "SCSI DEBUG",
5303         .info =                 scsi_debug_info,
5304         .slave_alloc =          scsi_debug_slave_alloc,
5305         .slave_configure =      scsi_debug_slave_configure,
5306         .slave_destroy =        scsi_debug_slave_destroy,
5307         .ioctl =                scsi_debug_ioctl,
5308         .queuecommand =         sdebug_queuecommand_lock_or_not,
5309         .change_queue_depth =   sdebug_change_qdepth,
5310         .eh_abort_handler =     scsi_debug_abort,
5311         .eh_device_reset_handler = scsi_debug_device_reset,
5312         .eh_target_reset_handler = scsi_debug_target_reset,
5313         .eh_bus_reset_handler = scsi_debug_bus_reset,
5314         .eh_host_reset_handler = scsi_debug_host_reset,
5315         .can_queue =            SCSI_DEBUG_CANQUEUE,
5316         .this_id =              7,
5317         .sg_tablesize =         SCSI_MAX_SG_CHAIN_SEGMENTS,
5318         .cmd_per_lun =          DEF_CMD_PER_LUN,
5319         .max_sectors =          -1U,
5320         .use_clustering =       DISABLE_CLUSTERING,
5321         .module =               THIS_MODULE,
5322         .track_queue_depth =    1,
5323         .cmd_size =             sizeof(struct sdebug_scmd_extra_t),
5324 };
5325
5326 static int sdebug_driver_probe(struct device * dev)
5327 {
5328         int error = 0;
5329         int opts;
5330         struct sdebug_host_info *sdbg_host;
5331         struct Scsi_Host *hpnt;
5332         int host_prot;
5333
5334         sdbg_host = to_sdebug_host(dev);
5335
5336         sdebug_driver_template.can_queue = scsi_debug_max_queue;
5337         if (scsi_debug_clustering)
5338                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5339         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5340         if (NULL == hpnt) {
5341                 pr_err("%s: scsi_host_alloc failed\n", __func__);
5342                 error = -ENODEV;
5343                 return error;
5344         }
5345
5346         sdbg_host->shost = hpnt;
5347         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5348         if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5349                 hpnt->max_id = scsi_debug_num_tgts + 1;
5350         else
5351                 hpnt->max_id = scsi_debug_num_tgts;
5352         hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;  /* = scsi_debug_max_luns; */
5353
5354         host_prot = 0;
5355
5356         switch (scsi_debug_dif) {
5357
5358         case SD_DIF_TYPE1_PROTECTION:
5359                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5360                 if (scsi_debug_dix)
5361                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5362                 break;
5363
5364         case SD_DIF_TYPE2_PROTECTION:
5365                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5366                 if (scsi_debug_dix)
5367                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5368                 break;
5369
5370         case SD_DIF_TYPE3_PROTECTION:
5371                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5372                 if (scsi_debug_dix)
5373                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5374                 break;
5375
5376         default:
5377                 if (scsi_debug_dix)
5378                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5379                 break;
5380         }
5381
5382         scsi_host_set_prot(hpnt, host_prot);
5383
5384         printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
5385                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5386                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5387                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5388                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5389                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5390                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5391                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5392
5393         if (scsi_debug_guard == 1)
5394                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5395         else
5396                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5397
5398         opts = scsi_debug_opts;
5399         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5400                 sdebug_any_injecting_opt = true;
5401         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5402                 sdebug_any_injecting_opt = true;
5403         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5404                 sdebug_any_injecting_opt = true;
5405         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5406                 sdebug_any_injecting_opt = true;
5407         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5408                 sdebug_any_injecting_opt = true;
5409
5410         error = scsi_add_host(hpnt, &sdbg_host->dev);
5411         if (error) {
5412                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
5413                 error = -ENODEV;
5414                 scsi_host_put(hpnt);
5415         } else
5416                 scsi_scan_host(hpnt);
5417
5418         return error;
5419 }
5420
5421 static int sdebug_driver_remove(struct device * dev)
5422 {
5423         struct sdebug_host_info *sdbg_host;
5424         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5425
5426         sdbg_host = to_sdebug_host(dev);
5427
5428         if (!sdbg_host) {
5429                 printk(KERN_ERR "%s: Unable to locate host info\n",
5430                        __func__);
5431                 return -ENODEV;
5432         }
5433
5434         scsi_remove_host(sdbg_host->shost);
5435
5436         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5437                                  dev_list) {
5438                 list_del(&sdbg_devinfo->dev_list);
5439                 kfree(sdbg_devinfo);
5440         }
5441
5442         scsi_host_put(sdbg_host->shost);
5443         return 0;
5444 }
5445
5446 static int pseudo_lld_bus_match(struct device *dev,
5447                                 struct device_driver *dev_driver)
5448 {
5449         return 1;
5450 }
5451
5452 static struct bus_type pseudo_lld_bus = {
5453         .name = "pseudo",
5454         .match = pseudo_lld_bus_match,
5455         .probe = sdebug_driver_probe,
5456         .remove = sdebug_driver_remove,
5457         .drv_groups = sdebug_drv_groups,
5458 };