Merge branches 'pm-cpufreq', 'pm-cpuidle', 'pm-devfreq', 'pm-opp' and 'pm-tools'
[linux-drm-fsl-dcu.git] / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
49
50 #include <net/checksum.h>
51
52 #include <asm/unaligned.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
62
63 #include "sd.h"
64 #include "scsi_logging.h"
65
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
68
69 #define MY_NAME "scsi_debug"
70
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94
95 /* Additional Sense Code Qualifier (ASCQ) */
96 #define ACK_NAK_TO 0x3
97
98
99 /* Default values for driver parameters */
100 #define DEF_NUM_HOST   1
101 #define DEF_NUM_TGTS   1
102 #define DEF_MAX_LUNS   1
103 /* With these defaults, this driver will make 1 host with 1 target
104  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
105  */
106 #define DEF_ATO 1
107 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
108 #define DEF_DEV_SIZE_MB   8
109 #define DEF_DIF 0
110 #define DEF_DIX 0
111 #define DEF_D_SENSE   0
112 #define DEF_EVERY_NTH   0
113 #define DEF_FAKE_RW     0
114 #define DEF_GUARD 0
115 #define DEF_HOST_LOCK 0
116 #define DEF_LBPU 0
117 #define DEF_LBPWS 0
118 #define DEF_LBPWS10 0
119 #define DEF_LBPRZ 1
120 #define DEF_LOWEST_ALIGNED 0
121 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
122 #define DEF_NO_LUN_0   0
123 #define DEF_NUM_PARTS   0
124 #define DEF_OPTS   0
125 #define DEF_OPT_BLKS 64
126 #define DEF_PHYSBLK_EXP 0
127 #define DEF_PTYPE   0
128 #define DEF_REMOVABLE false
129 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
130 #define DEF_SECTOR_SIZE 512
131 #define DEF_UNMAP_ALIGNMENT 0
132 #define DEF_UNMAP_GRANULARITY 1
133 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
134 #define DEF_UNMAP_MAX_DESC 256
135 #define DEF_VIRTUAL_GB   0
136 #define DEF_VPD_USE_HOSTNO 1
137 #define DEF_WRITESAME_LENGTH 0xFFFF
138 #define DEF_STRICT 0
139 #define DELAY_OVERRIDDEN -9999
140
141 /* bit mask values for scsi_debug_opts */
142 #define SCSI_DEBUG_OPT_NOISE   1
143 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
144 #define SCSI_DEBUG_OPT_TIMEOUT   4
145 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
146 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
147 #define SCSI_DEBUG_OPT_DIF_ERR   32
148 #define SCSI_DEBUG_OPT_DIX_ERR   64
149 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
150 #define SCSI_DEBUG_OPT_SHORT_TRANSFER   0x100
151 #define SCSI_DEBUG_OPT_Q_NOISE  0x200
152 #define SCSI_DEBUG_OPT_ALL_TSF  0x400
153 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
154 #define SCSI_DEBUG_OPT_N_WCE    0x1000
155 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
156 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
157 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
158 /* When "every_nth" > 0 then modulo "every_nth" commands:
159  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
160  *   - a RECOVERED_ERROR is simulated on successful read and write
161  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
162  *   - a TRANSPORT_ERROR is simulated on successful read and write
163  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
164  *
165  * When "every_nth" < 0 then after "- every_nth" commands:
166  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
167  *   - a RECOVERED_ERROR is simulated on successful read and write
168  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
169  *   - a TRANSPORT_ERROR is simulated on successful read and write
170  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
171  * This will continue until some other action occurs (e.g. the user
172  * writing a new value (other than -1 or 1) to every_nth via sysfs).
173  */
174
175 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
176  * priority order. In the subset implemented here lower numbers have higher
177  * priority. The UA numbers should be a sequence starting from 0 with
178  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
179 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
180 #define SDEBUG_UA_BUS_RESET 1
181 #define SDEBUG_UA_MODE_CHANGED 2
182 #define SDEBUG_UA_CAPACITY_CHANGED 3
183 #define SDEBUG_NUM_UAS 4
184
185 /* for check_readiness() */
186 #define UAS_ONLY 1      /* check for UAs only */
187 #define UAS_TUR 0       /* if no UAs then check if media access possible */
188
189 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
190  * sector on read commands: */
191 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
192 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
193
194 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
195  * or "peripheral device" addressing (value 0) */
196 #define SAM2_LUN_ADDRESS_METHOD 0
197 #define SAM2_WLUN_REPORT_LUNS 0xc101
198
199 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
200  * (for response) at one time. Can be reduced by max_queue option. Command
201  * responses are not queued when delay=0 and ndelay=0. The per-device
202  * DEF_CMD_PER_LUN can be changed via sysfs:
203  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
204  * SCSI_DEBUG_CANQUEUE. */
205 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
206 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
207 #define DEF_CMD_PER_LUN  255
208
209 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
210 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
211 #endif
212
213 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
214 enum sdeb_opcode_index {
215         SDEB_I_INVALID_OPCODE = 0,
216         SDEB_I_INQUIRY = 1,
217         SDEB_I_REPORT_LUNS = 2,
218         SDEB_I_REQUEST_SENSE = 3,
219         SDEB_I_TEST_UNIT_READY = 4,
220         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
221         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
222         SDEB_I_LOG_SENSE = 7,
223         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
224         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
225         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
226         SDEB_I_START_STOP = 11,
227         SDEB_I_SERV_ACT_IN = 12,        /* 12, 16 */
228         SDEB_I_SERV_ACT_OUT = 13,       /* 12, 16 */
229         SDEB_I_MAINT_IN = 14,
230         SDEB_I_MAINT_OUT = 15,
231         SDEB_I_VERIFY = 16,             /* 10 only */
232         SDEB_I_VARIABLE_LEN = 17,
233         SDEB_I_RESERVE = 18,            /* 6, 10 */
234         SDEB_I_RELEASE = 19,            /* 6, 10 */
235         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
236         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
237         SDEB_I_ATA_PT = 22,             /* 12, 16 */
238         SDEB_I_SEND_DIAG = 23,
239         SDEB_I_UNMAP = 24,
240         SDEB_I_XDWRITEREAD = 25,        /* 10 only */
241         SDEB_I_WRITE_BUFFER = 26,
242         SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
243         SDEB_I_SYNC_CACHE = 28,         /* 10 only */
244         SDEB_I_COMP_WRITE = 29,
245         SDEB_I_LAST_ELEMENT = 30,       /* keep this last */
246 };
247
248 static const unsigned char opcode_ind_arr[256] = {
249 /* 0x0; 0x0->0x1f: 6 byte cdbs */
250         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
251             0, 0, 0, 0,
252         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
253         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
254             SDEB_I_RELEASE,
255         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
256             SDEB_I_ALLOW_REMOVAL, 0,
257 /* 0x20; 0x20->0x3f: 10 byte cdbs */
258         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
259         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
260         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
261         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
262 /* 0x40; 0x40->0x5f: 10 byte cdbs */
263         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
264         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
265         0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
266             SDEB_I_RELEASE,
267         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
268 /* 0x60; 0x60->0x7d are reserved */
269         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
270         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
271         0, SDEB_I_VARIABLE_LEN,
272 /* 0x80; 0x80->0x9f: 16 byte cdbs */
273         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
274         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
275         0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
276         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
277 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
278         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
279              SDEB_I_MAINT_OUT, 0, 0, 0,
280         SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
281              0, 0, 0, 0,
282         0, 0, 0, 0, 0, 0, 0, 0,
283         0, 0, 0, 0, 0, 0, 0, 0,
284 /* 0xc0; 0xc0->0xff: vendor specific */
285         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
286         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
287         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
288         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
289 };
290
291 #define F_D_IN                  1
292 #define F_D_OUT                 2
293 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
294 #define F_D_UNKN                8
295 #define F_RL_WLUN_OK            0x10
296 #define F_SKIP_UA               0x20
297 #define F_DELAY_OVERR           0x40
298 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
299 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
300 #define F_INV_OP                0x200
301 #define F_FAKE_RW               0x400
302 #define F_M_ACCESS              0x800   /* media access */
303
304 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
305 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
306 #define FF_SA (F_SA_HIGH | F_SA_LOW)
307
308 struct sdebug_dev_info;
309 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
310 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
311 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
312 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
313 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
314 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
315 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
316 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
317 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
329
330 struct opcode_info_t {
331         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff
332                                  * for terminating element */
333         u8 opcode;              /* if num_attached > 0, preferred */
334         u16 sa;                 /* service action */
335         u32 flags;              /* OR-ed set of SDEB_F_* */
336         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
337         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
338         u8 len_mask[16];        /* len=len_mask[0], then mask for cdb[1]... */
339                                 /* ignore cdb bytes after position 15 */
340 };
341
342 static const struct opcode_info_t msense_iarr[1] = {
343         {0, 0x1a, 0, F_D_IN, NULL, NULL,
344             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
345 };
346
347 static const struct opcode_info_t mselect_iarr[1] = {
348         {0, 0x15, 0, F_D_OUT, NULL, NULL,
349             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
350 };
351
352 static const struct opcode_info_t read_iarr[3] = {
353         {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
354             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
355              0, 0, 0, 0} },
356         {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
357             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
358         {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
359             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
360              0xc7, 0, 0, 0, 0} },
361 };
362
363 static const struct opcode_info_t write_iarr[3] = {
364         {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
365             {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
366              0, 0, 0, 0} },
367         {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
368             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
369         {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
370             {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
371              0xc7, 0, 0, 0, 0} },
372 };
373
374 static const struct opcode_info_t sa_in_iarr[1] = {
375         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
376             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
377              0xff, 0xff, 0xff, 0, 0xc7} },
378 };
379
380 static const struct opcode_info_t vl_iarr[1] = {        /* VARIABLE LENGTH */
381         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
382             NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
383                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
384 };
385
386 static const struct opcode_info_t maint_in_iarr[2] = {
387         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
388             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
389              0xc7, 0, 0, 0, 0} },
390         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
391             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
392              0, 0} },
393 };
394
395 static const struct opcode_info_t write_same_iarr[1] = {
396         {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
397             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
398              0xff, 0xff, 0xff, 0x1f, 0xc7} },
399 };
400
401 static const struct opcode_info_t reserve_iarr[1] = {
402         {0, 0x16, 0, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
403             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
404 };
405
406 static const struct opcode_info_t release_iarr[1] = {
407         {0, 0x17, 0, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
408             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
409 };
410
411
412 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
413  * plus the terminating elements for logic that scans this table such as
414  * REPORT SUPPORTED OPERATION CODES. */
415 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
416 /* 0 */
417         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
418             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
419         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
420             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
421         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
422             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
423              0, 0} },
424         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
425             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
426         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
427             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
428         {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
429             {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
430              0} },
431         {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
432             {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
433         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
434             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
435              0, 0, 0} },
436         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
437             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
438              0, 0} },
439         {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
440             {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
441              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* READ(16) */
442 /* 10 */
443         {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
444             {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
445              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* WRITE(16) */
446         {0, 0x1b, 0, 0, resp_start_stop, NULL,          /* START STOP UNIT */
447             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
448         {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
449             {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
450              0xff, 0xff, 0xff, 0x1, 0xc7} },    /* READ CAPACITY(16) */
451         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
452             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
453         {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
454             {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
455              0} },
456         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
457             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
458         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
459             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
460         {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
461             vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
462                       0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
463         {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
464             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
465              0} },
466         {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
467             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
468              0} },
469 /* 20 */
470         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
471             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
472         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
473             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
474         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
475             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
476         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
477             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
478         {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
479             {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
480         {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
481             NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
482                    0, 0, 0, 0, 0, 0} },
483         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* WRITE_BUFFER */
484             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
485         {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
486             write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
487                               0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
488         {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
489             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
490              0, 0, 0, 0} },
491         {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
492             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
493              0, 0xff, 0x1f, 0xc7} },            /* COMPARE AND WRITE */
494
495 /* 30 */
496         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
497             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
498 };
499
500 struct sdebug_scmd_extra_t {
501         bool inj_recovered;
502         bool inj_transport;
503         bool inj_dif;
504         bool inj_dix;
505         bool inj_short;
506 };
507
508 static int scsi_debug_add_host = DEF_NUM_HOST;
509 static int scsi_debug_ato = DEF_ATO;
510 static int scsi_debug_delay = DEF_DELAY;
511 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
512 static int scsi_debug_dif = DEF_DIF;
513 static int scsi_debug_dix = DEF_DIX;
514 static int scsi_debug_dsense = DEF_D_SENSE;
515 static int scsi_debug_every_nth = DEF_EVERY_NTH;
516 static int scsi_debug_fake_rw = DEF_FAKE_RW;
517 static unsigned int scsi_debug_guard = DEF_GUARD;
518 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
519 static int scsi_debug_max_luns = DEF_MAX_LUNS;
520 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
521 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
522 static int scsi_debug_ndelay = DEF_NDELAY;
523 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
524 static int scsi_debug_no_uld = 0;
525 static int scsi_debug_num_parts = DEF_NUM_PARTS;
526 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
527 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
528 static int scsi_debug_opts = DEF_OPTS;
529 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
530 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
531 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
532 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
533 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
534 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
535 static unsigned int scsi_debug_lbpu = DEF_LBPU;
536 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
537 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
538 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
539 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
540 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
541 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
542 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
543 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
544 static bool scsi_debug_removable = DEF_REMOVABLE;
545 static bool scsi_debug_clustering;
546 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
547 static bool scsi_debug_strict = DEF_STRICT;
548 static bool sdebug_any_injecting_opt;
549
550 static atomic_t sdebug_cmnd_count;
551 static atomic_t sdebug_completions;
552 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
553
554 #define DEV_READONLY(TGT)      (0)
555
556 static unsigned int sdebug_store_sectors;
557 static sector_t sdebug_capacity;        /* in sectors */
558
559 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
560    may still need them */
561 static int sdebug_heads;                /* heads per disk */
562 static int sdebug_cylinders_per;        /* cylinders per surface */
563 static int sdebug_sectors_per;          /* sectors per cylinder */
564
565 #define SDEBUG_MAX_PARTS 4
566
567 #define SCSI_DEBUG_MAX_CMD_LEN 32
568
569 static unsigned int scsi_debug_lbp(void)
570 {
571         return ((0 == scsi_debug_fake_rw) &&
572                 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
573 }
574
575 struct sdebug_dev_info {
576         struct list_head dev_list;
577         unsigned int channel;
578         unsigned int target;
579         u64 lun;
580         struct sdebug_host_info *sdbg_host;
581         unsigned long uas_bm[1];
582         atomic_t num_in_q;
583         char stopped;           /* TODO: should be atomic */
584         bool used;
585 };
586
587 struct sdebug_host_info {
588         struct list_head host_list;
589         struct Scsi_Host *shost;
590         struct device dev;
591         struct list_head dev_info_list;
592 };
593
594 #define to_sdebug_host(d)       \
595         container_of(d, struct sdebug_host_info, dev)
596
597 static LIST_HEAD(sdebug_host_list);
598 static DEFINE_SPINLOCK(sdebug_host_list_lock);
599
600
601 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
602         struct hrtimer hrt;     /* must be first element */
603         int qa_indx;
604 };
605
606 struct sdebug_queued_cmd {
607         /* in_use flagged by a bit in queued_in_use_bm[] */
608         struct timer_list *cmnd_timerp;
609         struct tasklet_struct *tletp;
610         struct sdebug_hrtimer *sd_hrtp;
611         struct scsi_cmnd * a_cmnd;
612 };
613 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
614 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
615
616
617 static unsigned char * fake_storep;     /* ramdisk storage */
618 static struct sd_dif_tuple *dif_storep; /* protection info */
619 static void *map_storep;                /* provisioning map */
620
621 static unsigned long map_size;
622 static int num_aborts;
623 static int num_dev_resets;
624 static int num_target_resets;
625 static int num_bus_resets;
626 static int num_host_resets;
627 static int dix_writes;
628 static int dix_reads;
629 static int dif_errors;
630
631 static DEFINE_SPINLOCK(queued_arr_lock);
632 static DEFINE_RWLOCK(atomic_rw);
633
634 static char sdebug_proc_name[] = MY_NAME;
635 static const char *my_name = MY_NAME;
636
637 static struct bus_type pseudo_lld_bus;
638
639 static struct device_driver sdebug_driverfs_driver = {
640         .name           = sdebug_proc_name,
641         .bus            = &pseudo_lld_bus,
642 };
643
644 static const int check_condition_result =
645                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
646
647 static const int illegal_condition_result =
648         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
649
650 static const int device_qfull_result =
651         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
652
653 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
654                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
655                                      0, 0, 0, 0};
656 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
657                                     0, 0, 0x2, 0x4b};
658 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
659                                    0, 0, 0x0, 0x0};
660
661 static void *fake_store(unsigned long long lba)
662 {
663         lba = do_div(lba, sdebug_store_sectors);
664
665         return fake_storep + lba * scsi_debug_sector_size;
666 }
667
668 static struct sd_dif_tuple *dif_store(sector_t sector)
669 {
670         sector = do_div(sector, sdebug_store_sectors);
671
672         return dif_storep + sector;
673 }
674
675 static int sdebug_add_adapter(void);
676 static void sdebug_remove_adapter(void);
677
678 static void sdebug_max_tgts_luns(void)
679 {
680         struct sdebug_host_info *sdbg_host;
681         struct Scsi_Host *hpnt;
682
683         spin_lock(&sdebug_host_list_lock);
684         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
685                 hpnt = sdbg_host->shost;
686                 if ((hpnt->this_id >= 0) &&
687                     (scsi_debug_num_tgts > hpnt->this_id))
688                         hpnt->max_id = scsi_debug_num_tgts + 1;
689                 else
690                         hpnt->max_id = scsi_debug_num_tgts;
691                 /* scsi_debug_max_luns; */
692                 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
693         }
694         spin_unlock(&sdebug_host_list_lock);
695 }
696
697 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
698
699 /* Set in_bit to -1 to indicate no bit position of invalid field */
700 static void
701 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
702                      int in_byte, int in_bit)
703 {
704         unsigned char *sbuff;
705         u8 sks[4];
706         int sl, asc;
707
708         sbuff = scp->sense_buffer;
709         if (!sbuff) {
710                 sdev_printk(KERN_ERR, scp->device,
711                             "%s: sense_buffer is NULL\n", __func__);
712                 return;
713         }
714         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
715         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
716         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
717                                 asc, 0);
718         memset(sks, 0, sizeof(sks));
719         sks[0] = 0x80;
720         if (c_d)
721                 sks[0] |= 0x40;
722         if (in_bit >= 0) {
723                 sks[0] |= 0x8;
724                 sks[0] |= 0x7 & in_bit;
725         }
726         put_unaligned_be16(in_byte, sks + 1);
727         if (scsi_debug_dsense) {
728                 sl = sbuff[7] + 8;
729                 sbuff[7] = sl;
730                 sbuff[sl] = 0x2;
731                 sbuff[sl + 1] = 0x6;
732                 memcpy(sbuff + sl + 4, sks, 3);
733         } else
734                 memcpy(sbuff + 15, sks, 3);
735         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
736                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
737                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
738                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
739 }
740
741 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
742 {
743         unsigned char *sbuff;
744
745         sbuff = scp->sense_buffer;
746         if (!sbuff) {
747                 sdev_printk(KERN_ERR, scp->device,
748                             "%s: sense_buffer is NULL\n", __func__);
749                 return;
750         }
751         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
752
753         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
754
755         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
756                 sdev_printk(KERN_INFO, scp->device,
757                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
758                             my_name, key, asc, asq);
759 }
760
761 static void
762 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
763 {
764         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
765 }
766
767 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
768 {
769         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
770                 if (0x1261 == cmd)
771                         sdev_printk(KERN_INFO, dev,
772                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
773                 else if (0x5331 == cmd)
774                         sdev_printk(KERN_INFO, dev,
775                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
776                                     __func__);
777                 else
778                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
779                                     __func__, cmd);
780         }
781         return -EINVAL;
782         /* return -ENOTTY; // correct return but upsets fdisk */
783 }
784
785 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
786                            struct sdebug_dev_info * devip)
787 {
788         int k;
789         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
790
791         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
792         if (k != SDEBUG_NUM_UAS) {
793                 const char *cp = NULL;
794
795                 switch (k) {
796                 case SDEBUG_UA_POR:
797                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
798                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
799                         if (debug)
800                                 cp = "power on reset";
801                         break;
802                 case SDEBUG_UA_BUS_RESET:
803                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
804                                         UA_RESET_ASC, BUS_RESET_ASCQ);
805                         if (debug)
806                                 cp = "bus reset";
807                         break;
808                 case SDEBUG_UA_MODE_CHANGED:
809                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
810                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
811                         if (debug)
812                                 cp = "mode parameters changed";
813                         break;
814                 case SDEBUG_UA_CAPACITY_CHANGED:
815                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
816                                         UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
817                         if (debug)
818                                 cp = "capacity data changed";
819                         break;
820                 default:
821                         pr_warn("%s: unexpected unit attention code=%d\n",
822                                 __func__, k);
823                         if (debug)
824                                 cp = "unknown";
825                         break;
826                 }
827                 clear_bit(k, devip->uas_bm);
828                 if (debug)
829                         sdev_printk(KERN_INFO, SCpnt->device,
830                                    "%s reports: Unit attention: %s\n",
831                                    my_name, cp);
832                 return check_condition_result;
833         }
834         if ((UAS_TUR == uas_only) && devip->stopped) {
835                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
836                                 0x2);
837                 if (debug)
838                         sdev_printk(KERN_INFO, SCpnt->device,
839                                     "%s reports: Not ready: %s\n", my_name,
840                                     "initializing command required");
841                 return check_condition_result;
842         }
843         return 0;
844 }
845
846 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
847 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
848                                 int arr_len)
849 {
850         int act_len;
851         struct scsi_data_buffer *sdb = scsi_in(scp);
852
853         if (!sdb->length)
854                 return 0;
855         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
856                 return (DID_ERROR << 16);
857
858         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
859                                       arr, arr_len);
860         sdb->resid = scsi_bufflen(scp) - act_len;
861
862         return 0;
863 }
864
865 /* Returns number of bytes fetched into 'arr' or -1 if error. */
866 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
867                                int arr_len)
868 {
869         if (!scsi_bufflen(scp))
870                 return 0;
871         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
872                 return -1;
873
874         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
875 }
876
877
878 static const char * inq_vendor_id = "Linux   ";
879 static const char * inq_product_id = "scsi_debug      ";
880 static const char *inq_product_rev = "0184";    /* version less '.' */
881
882 /* Device identification VPD page. Returns number of bytes placed in arr */
883 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
884                            int target_dev_id, int dev_id_num,
885                            const char * dev_id_str,
886                            int dev_id_str_len)
887 {
888         int num, port_a;
889         char b[32];
890
891         port_a = target_dev_id + 1;
892         /* T10 vendor identifier field format (faked) */
893         arr[0] = 0x2;   /* ASCII */
894         arr[1] = 0x1;
895         arr[2] = 0x0;
896         memcpy(&arr[4], inq_vendor_id, 8);
897         memcpy(&arr[12], inq_product_id, 16);
898         memcpy(&arr[28], dev_id_str, dev_id_str_len);
899         num = 8 + 16 + dev_id_str_len;
900         arr[3] = num;
901         num += 4;
902         if (dev_id_num >= 0) {
903                 /* NAA-5, Logical unit identifier (binary) */
904                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
905                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
906                 arr[num++] = 0x0;
907                 arr[num++] = 0x8;
908                 arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
909                 arr[num++] = 0x33;
910                 arr[num++] = 0x33;
911                 arr[num++] = 0x30;
912                 arr[num++] = (dev_id_num >> 24);
913                 arr[num++] = (dev_id_num >> 16) & 0xff;
914                 arr[num++] = (dev_id_num >> 8) & 0xff;
915                 arr[num++] = dev_id_num & 0xff;
916                 /* Target relative port number */
917                 arr[num++] = 0x61;      /* proto=sas, binary */
918                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
919                 arr[num++] = 0x0;       /* reserved */
920                 arr[num++] = 0x4;       /* length */
921                 arr[num++] = 0x0;       /* reserved */
922                 arr[num++] = 0x0;       /* reserved */
923                 arr[num++] = 0x0;
924                 arr[num++] = 0x1;       /* relative port A */
925         }
926         /* NAA-5, Target port identifier */
927         arr[num++] = 0x61;      /* proto=sas, binary */
928         arr[num++] = 0x93;      /* piv=1, target port, naa */
929         arr[num++] = 0x0;
930         arr[num++] = 0x8;
931         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
932         arr[num++] = 0x22;
933         arr[num++] = 0x22;
934         arr[num++] = 0x20;
935         arr[num++] = (port_a >> 24);
936         arr[num++] = (port_a >> 16) & 0xff;
937         arr[num++] = (port_a >> 8) & 0xff;
938         arr[num++] = port_a & 0xff;
939         /* NAA-5, Target port group identifier */
940         arr[num++] = 0x61;      /* proto=sas, binary */
941         arr[num++] = 0x95;      /* piv=1, target port group id */
942         arr[num++] = 0x0;
943         arr[num++] = 0x4;
944         arr[num++] = 0;
945         arr[num++] = 0;
946         arr[num++] = (port_group_id >> 8) & 0xff;
947         arr[num++] = port_group_id & 0xff;
948         /* NAA-5, Target device identifier */
949         arr[num++] = 0x61;      /* proto=sas, binary */
950         arr[num++] = 0xa3;      /* piv=1, target device, naa */
951         arr[num++] = 0x0;
952         arr[num++] = 0x8;
953         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
954         arr[num++] = 0x22;
955         arr[num++] = 0x22;
956         arr[num++] = 0x20;
957         arr[num++] = (target_dev_id >> 24);
958         arr[num++] = (target_dev_id >> 16) & 0xff;
959         arr[num++] = (target_dev_id >> 8) & 0xff;
960         arr[num++] = target_dev_id & 0xff;
961         /* SCSI name string: Target device identifier */
962         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
963         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
964         arr[num++] = 0x0;
965         arr[num++] = 24;
966         memcpy(arr + num, "naa.52222220", 12);
967         num += 12;
968         snprintf(b, sizeof(b), "%08X", target_dev_id);
969         memcpy(arr + num, b, 8);
970         num += 8;
971         memset(arr + num, 0, 4);
972         num += 4;
973         return num;
974 }
975
976
977 static unsigned char vpd84_data[] = {
978 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
979     0x22,0x22,0x22,0x0,0xbb,0x1,
980     0x22,0x22,0x22,0x0,0xbb,0x2,
981 };
982
983 /*  Software interface identification VPD page */
984 static int inquiry_evpd_84(unsigned char * arr)
985 {
986         memcpy(arr, vpd84_data, sizeof(vpd84_data));
987         return sizeof(vpd84_data);
988 }
989
990 /* Management network addresses VPD page */
991 static int inquiry_evpd_85(unsigned char * arr)
992 {
993         int num = 0;
994         const char * na1 = "https://www.kernel.org/config";
995         const char * na2 = "http://www.kernel.org/log";
996         int plen, olen;
997
998         arr[num++] = 0x1;       /* lu, storage config */
999         arr[num++] = 0x0;       /* reserved */
1000         arr[num++] = 0x0;
1001         olen = strlen(na1);
1002         plen = olen + 1;
1003         if (plen % 4)
1004                 plen = ((plen / 4) + 1) * 4;
1005         arr[num++] = plen;      /* length, null termianted, padded */
1006         memcpy(arr + num, na1, olen);
1007         memset(arr + num + olen, 0, plen - olen);
1008         num += plen;
1009
1010         arr[num++] = 0x4;       /* lu, logging */
1011         arr[num++] = 0x0;       /* reserved */
1012         arr[num++] = 0x0;
1013         olen = strlen(na2);
1014         plen = olen + 1;
1015         if (plen % 4)
1016                 plen = ((plen / 4) + 1) * 4;
1017         arr[num++] = plen;      /* length, null terminated, padded */
1018         memcpy(arr + num, na2, olen);
1019         memset(arr + num + olen, 0, plen - olen);
1020         num += plen;
1021
1022         return num;
1023 }
1024
1025 /* SCSI ports VPD page */
1026 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1027 {
1028         int num = 0;
1029         int port_a, port_b;
1030
1031         port_a = target_dev_id + 1;
1032         port_b = port_a + 1;
1033         arr[num++] = 0x0;       /* reserved */
1034         arr[num++] = 0x0;       /* reserved */
1035         arr[num++] = 0x0;
1036         arr[num++] = 0x1;       /* relative port 1 (primary) */
1037         memset(arr + num, 0, 6);
1038         num += 6;
1039         arr[num++] = 0x0;
1040         arr[num++] = 12;        /* length tp descriptor */
1041         /* naa-5 target port identifier (A) */
1042         arr[num++] = 0x61;      /* proto=sas, binary */
1043         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1044         arr[num++] = 0x0;       /* reserved */
1045         arr[num++] = 0x8;       /* length */
1046         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1047         arr[num++] = 0x22;
1048         arr[num++] = 0x22;
1049         arr[num++] = 0x20;
1050         arr[num++] = (port_a >> 24);
1051         arr[num++] = (port_a >> 16) & 0xff;
1052         arr[num++] = (port_a >> 8) & 0xff;
1053         arr[num++] = port_a & 0xff;
1054
1055         arr[num++] = 0x0;       /* reserved */
1056         arr[num++] = 0x0;       /* reserved */
1057         arr[num++] = 0x0;
1058         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1059         memset(arr + num, 0, 6);
1060         num += 6;
1061         arr[num++] = 0x0;
1062         arr[num++] = 12;        /* length tp descriptor */
1063         /* naa-5 target port identifier (B) */
1064         arr[num++] = 0x61;      /* proto=sas, binary */
1065         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1066         arr[num++] = 0x0;       /* reserved */
1067         arr[num++] = 0x8;       /* length */
1068         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1069         arr[num++] = 0x22;
1070         arr[num++] = 0x22;
1071         arr[num++] = 0x20;
1072         arr[num++] = (port_b >> 24);
1073         arr[num++] = (port_b >> 16) & 0xff;
1074         arr[num++] = (port_b >> 8) & 0xff;
1075         arr[num++] = port_b & 0xff;
1076
1077         return num;
1078 }
1079
1080
1081 static unsigned char vpd89_data[] = {
1082 /* from 4th byte */ 0,0,0,0,
1083 'l','i','n','u','x',' ',' ',' ',
1084 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1085 '1','2','3','4',
1086 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1087 0xec,0,0,0,
1088 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1089 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1090 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1091 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1092 0x53,0x41,
1093 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1094 0x20,0x20,
1095 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1096 0x10,0x80,
1097 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1098 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1099 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1100 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1101 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1102 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1103 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1104 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1105 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1106 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1107 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1108 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1109 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1110 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1111 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1112 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1113 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1114 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1115 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1116 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1117 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1118 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1119 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1120 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1121 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1122 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1123 };
1124
1125 /* ATA Information VPD page */
1126 static int inquiry_evpd_89(unsigned char * arr)
1127 {
1128         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1129         return sizeof(vpd89_data);
1130 }
1131
1132
1133 static unsigned char vpdb0_data[] = {
1134         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1135         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1136         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1137         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1138 };
1139
1140 /* Block limits VPD page (SBC-3) */
1141 static int inquiry_evpd_b0(unsigned char * arr)
1142 {
1143         unsigned int gran;
1144
1145         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1146
1147         /* Optimal transfer length granularity */
1148         gran = 1 << scsi_debug_physblk_exp;
1149         arr[2] = (gran >> 8) & 0xff;
1150         arr[3] = gran & 0xff;
1151
1152         /* Maximum Transfer Length */
1153         if (sdebug_store_sectors > 0x400) {
1154                 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1155                 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1156                 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1157                 arr[7] = sdebug_store_sectors & 0xff;
1158         }
1159
1160         /* Optimal Transfer Length */
1161         put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1162
1163         if (scsi_debug_lbpu) {
1164                 /* Maximum Unmap LBA Count */
1165                 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1166
1167                 /* Maximum Unmap Block Descriptor Count */
1168                 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1169         }
1170
1171         /* Unmap Granularity Alignment */
1172         if (scsi_debug_unmap_alignment) {
1173                 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1174                 arr[28] |= 0x80; /* UGAVALID */
1175         }
1176
1177         /* Optimal Unmap Granularity */
1178         put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1179
1180         /* Maximum WRITE SAME Length */
1181         put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1182
1183         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1184
1185         return sizeof(vpdb0_data);
1186 }
1187
1188 /* Block device characteristics VPD page (SBC-3) */
1189 static int inquiry_evpd_b1(unsigned char *arr)
1190 {
1191         memset(arr, 0, 0x3c);
1192         arr[0] = 0;
1193         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1194         arr[2] = 0;
1195         arr[3] = 5;     /* less than 1.8" */
1196
1197         return 0x3c;
1198 }
1199
1200 /* Logical block provisioning VPD page (SBC-3) */
1201 static int inquiry_evpd_b2(unsigned char *arr)
1202 {
1203         memset(arr, 0, 0x4);
1204         arr[0] = 0;                     /* threshold exponent */
1205
1206         if (scsi_debug_lbpu)
1207                 arr[1] = 1 << 7;
1208
1209         if (scsi_debug_lbpws)
1210                 arr[1] |= 1 << 6;
1211
1212         if (scsi_debug_lbpws10)
1213                 arr[1] |= 1 << 5;
1214
1215         if (scsi_debug_lbprz)
1216                 arr[1] |= 1 << 2;
1217
1218         return 0x4;
1219 }
1220
1221 #define SDEBUG_LONG_INQ_SZ 96
1222 #define SDEBUG_MAX_INQ_ARR_SZ 584
1223
1224 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1225 {
1226         unsigned char pq_pdt;
1227         unsigned char * arr;
1228         unsigned char *cmd = scp->cmnd;
1229         int alloc_len, n, ret;
1230         bool have_wlun;
1231
1232         alloc_len = (cmd[3] << 8) + cmd[4];
1233         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1234         if (! arr)
1235                 return DID_REQUEUE << 16;
1236         have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1237         if (have_wlun)
1238                 pq_pdt = 0x1e;  /* present, wlun */
1239         else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1240                 pq_pdt = 0x7f;  /* not present, no device type */
1241         else
1242                 pq_pdt = (scsi_debug_ptype & 0x1f);
1243         arr[0] = pq_pdt;
1244         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1245                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1246                 kfree(arr);
1247                 return check_condition_result;
1248         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1249                 int lu_id_num, port_group_id, target_dev_id, len;
1250                 char lu_id_str[6];
1251                 int host_no = devip->sdbg_host->shost->host_no;
1252                 
1253                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1254                     (devip->channel & 0x7f);
1255                 if (0 == scsi_debug_vpd_use_hostno)
1256                         host_no = 0;
1257                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1258                             (devip->target * 1000) + devip->lun);
1259                 target_dev_id = ((host_no + 1) * 2000) +
1260                                  (devip->target * 1000) - 3;
1261                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1262                 if (0 == cmd[2]) { /* supported vital product data pages */
1263                         arr[1] = cmd[2];        /*sanity */
1264                         n = 4;
1265                         arr[n++] = 0x0;   /* this page */
1266                         arr[n++] = 0x80;  /* unit serial number */
1267                         arr[n++] = 0x83;  /* device identification */
1268                         arr[n++] = 0x84;  /* software interface ident. */
1269                         arr[n++] = 0x85;  /* management network addresses */
1270                         arr[n++] = 0x86;  /* extended inquiry */
1271                         arr[n++] = 0x87;  /* mode page policy */
1272                         arr[n++] = 0x88;  /* SCSI ports */
1273                         arr[n++] = 0x89;  /* ATA information */
1274                         arr[n++] = 0xb0;  /* Block limits (SBC) */
1275                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1276                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1277                                 arr[n++] = 0xb2;
1278                         arr[3] = n - 4;   /* number of supported VPD pages */
1279                 } else if (0x80 == cmd[2]) { /* unit serial number */
1280                         arr[1] = cmd[2];        /*sanity */
1281                         arr[3] = len;
1282                         memcpy(&arr[4], lu_id_str, len);
1283                 } else if (0x83 == cmd[2]) { /* device identification */
1284                         arr[1] = cmd[2];        /*sanity */
1285                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1286                                                  target_dev_id, lu_id_num,
1287                                                  lu_id_str, len);
1288                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1289                         arr[1] = cmd[2];        /*sanity */
1290                         arr[3] = inquiry_evpd_84(&arr[4]);
1291                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1292                         arr[1] = cmd[2];        /*sanity */
1293                         arr[3] = inquiry_evpd_85(&arr[4]);
1294                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1295                         arr[1] = cmd[2];        /*sanity */
1296                         arr[3] = 0x3c;  /* number of following entries */
1297                         if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1298                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1299                         else if (scsi_debug_dif)
1300                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1301                         else
1302                                 arr[4] = 0x0;   /* no protection stuff */
1303                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1304                 } else if (0x87 == cmd[2]) { /* mode page policy */
1305                         arr[1] = cmd[2];        /*sanity */
1306                         arr[3] = 0x8;   /* number of following entries */
1307                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1308                         arr[6] = 0x80;  /* mlus, shared */
1309                         arr[8] = 0x18;   /* protocol specific lu */
1310                         arr[10] = 0x82;  /* mlus, per initiator port */
1311                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1312                         arr[1] = cmd[2];        /*sanity */
1313                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1314                 } else if (0x89 == cmd[2]) { /* ATA information */
1315                         arr[1] = cmd[2];        /*sanity */
1316                         n = inquiry_evpd_89(&arr[4]);
1317                         arr[2] = (n >> 8);
1318                         arr[3] = (n & 0xff);
1319                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1320                         arr[1] = cmd[2];        /*sanity */
1321                         arr[3] = inquiry_evpd_b0(&arr[4]);
1322                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1323                         arr[1] = cmd[2];        /*sanity */
1324                         arr[3] = inquiry_evpd_b1(&arr[4]);
1325                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1326                         arr[1] = cmd[2];        /*sanity */
1327                         arr[3] = inquiry_evpd_b2(&arr[4]);
1328                 } else {
1329                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1330                         kfree(arr);
1331                         return check_condition_result;
1332                 }
1333                 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1334                 ret = fill_from_dev_buffer(scp, arr,
1335                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1336                 kfree(arr);
1337                 return ret;
1338         }
1339         /* drops through here for a standard inquiry */
1340         arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
1341         arr[2] = scsi_debug_scsi_level;
1342         arr[3] = 2;    /* response_data_format==2 */
1343         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1344         arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1345         if (0 == scsi_debug_vpd_use_hostno)
1346                 arr[5] = 0x10; /* claim: implicit TGPS */
1347         arr[6] = 0x10; /* claim: MultiP */
1348         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1349         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1350         memcpy(&arr[8], inq_vendor_id, 8);
1351         memcpy(&arr[16], inq_product_id, 16);
1352         memcpy(&arr[32], inq_product_rev, 4);
1353         /* version descriptors (2 bytes each) follow */
1354         arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1355         arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1356         n = 62;
1357         if (scsi_debug_ptype == 0) {
1358                 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1359         } else if (scsi_debug_ptype == 1) {
1360                 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1361         }
1362         arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1363         ret = fill_from_dev_buffer(scp, arr,
1364                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1365         kfree(arr);
1366         return ret;
1367 }
1368
1369 static int resp_requests(struct scsi_cmnd * scp,
1370                          struct sdebug_dev_info * devip)
1371 {
1372         unsigned char * sbuff;
1373         unsigned char *cmd = scp->cmnd;
1374         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1375         bool dsense, want_dsense;
1376         int len = 18;
1377
1378         memset(arr, 0, sizeof(arr));
1379         dsense = !!(cmd[1] & 1);
1380         want_dsense = dsense || scsi_debug_dsense;
1381         sbuff = scp->sense_buffer;
1382         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1383                 if (dsense) {
1384                         arr[0] = 0x72;
1385                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1386                         arr[2] = THRESHOLD_EXCEEDED;
1387                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1388                         len = 8;
1389                 } else {
1390                         arr[0] = 0x70;
1391                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1392                         arr[7] = 0xa;           /* 18 byte sense buffer */
1393                         arr[12] = THRESHOLD_EXCEEDED;
1394                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1395                 }
1396         } else {
1397                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1398                 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1399                         ;       /* have sense and formats match */
1400                 else if (arr[0] <= 0x70) {
1401                         if (dsense) {
1402                                 memset(arr, 0, 8);
1403                                 arr[0] = 0x72;
1404                                 len = 8;
1405                         } else {
1406                                 memset(arr, 0, 18);
1407                                 arr[0] = 0x70;
1408                                 arr[7] = 0xa;
1409                         }
1410                 } else if (dsense) {
1411                         memset(arr, 0, 8);
1412                         arr[0] = 0x72;
1413                         arr[1] = sbuff[2];     /* sense key */
1414                         arr[2] = sbuff[12];    /* asc */
1415                         arr[3] = sbuff[13];    /* ascq */
1416                         len = 8;
1417                 } else {
1418                         memset(arr, 0, 18);
1419                         arr[0] = 0x70;
1420                         arr[2] = sbuff[1];
1421                         arr[7] = 0xa;
1422                         arr[12] = sbuff[1];
1423                         arr[13] = sbuff[3];
1424                 }
1425
1426         }
1427         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1428         return fill_from_dev_buffer(scp, arr, len);
1429 }
1430
1431 static int resp_start_stop(struct scsi_cmnd * scp,
1432                            struct sdebug_dev_info * devip)
1433 {
1434         unsigned char *cmd = scp->cmnd;
1435         int power_cond, start;
1436
1437         power_cond = (cmd[4] & 0xf0) >> 4;
1438         if (power_cond) {
1439                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1440                 return check_condition_result;
1441         }
1442         start = cmd[4] & 1;
1443         if (start == devip->stopped)
1444                 devip->stopped = !start;
1445         return 0;
1446 }
1447
1448 static sector_t get_sdebug_capacity(void)
1449 {
1450         if (scsi_debug_virtual_gb > 0)
1451                 return (sector_t)scsi_debug_virtual_gb *
1452                         (1073741824 / scsi_debug_sector_size);
1453         else
1454                 return sdebug_store_sectors;
1455 }
1456
1457 #define SDEBUG_READCAP_ARR_SZ 8
1458 static int resp_readcap(struct scsi_cmnd * scp,
1459                         struct sdebug_dev_info * devip)
1460 {
1461         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1462         unsigned int capac;
1463
1464         /* following just in case virtual_gb changed */
1465         sdebug_capacity = get_sdebug_capacity();
1466         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1467         if (sdebug_capacity < 0xffffffff) {
1468                 capac = (unsigned int)sdebug_capacity - 1;
1469                 arr[0] = (capac >> 24);
1470                 arr[1] = (capac >> 16) & 0xff;
1471                 arr[2] = (capac >> 8) & 0xff;
1472                 arr[3] = capac & 0xff;
1473         } else {
1474                 arr[0] = 0xff;
1475                 arr[1] = 0xff;
1476                 arr[2] = 0xff;
1477                 arr[3] = 0xff;
1478         }
1479         arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1480         arr[7] = scsi_debug_sector_size & 0xff;
1481         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1482 }
1483
1484 #define SDEBUG_READCAP16_ARR_SZ 32
1485 static int resp_readcap16(struct scsi_cmnd * scp,
1486                           struct sdebug_dev_info * devip)
1487 {
1488         unsigned char *cmd = scp->cmnd;
1489         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1490         unsigned long long capac;
1491         int k, alloc_len;
1492
1493         alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1494                      + cmd[13]);
1495         /* following just in case virtual_gb changed */
1496         sdebug_capacity = get_sdebug_capacity();
1497         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1498         capac = sdebug_capacity - 1;
1499         for (k = 0; k < 8; ++k, capac >>= 8)
1500                 arr[7 - k] = capac & 0xff;
1501         arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1502         arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1503         arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1504         arr[11] = scsi_debug_sector_size & 0xff;
1505         arr[13] = scsi_debug_physblk_exp & 0xf;
1506         arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1507
1508         if (scsi_debug_lbp()) {
1509                 arr[14] |= 0x80; /* LBPME */
1510                 if (scsi_debug_lbprz)
1511                         arr[14] |= 0x40; /* LBPRZ */
1512         }
1513
1514         arr[15] = scsi_debug_lowest_aligned & 0xff;
1515
1516         if (scsi_debug_dif) {
1517                 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1518                 arr[12] |= 1; /* PROT_EN */
1519         }
1520
1521         return fill_from_dev_buffer(scp, arr,
1522                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1523 }
1524
1525 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1526
1527 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1528                               struct sdebug_dev_info * devip)
1529 {
1530         unsigned char *cmd = scp->cmnd;
1531         unsigned char * arr;
1532         int host_no = devip->sdbg_host->shost->host_no;
1533         int n, ret, alen, rlen;
1534         int port_group_a, port_group_b, port_a, port_b;
1535
1536         alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1537                 + cmd[9]);
1538
1539         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1540         if (! arr)
1541                 return DID_REQUEUE << 16;
1542         /*
1543          * EVPD page 0x88 states we have two ports, one
1544          * real and a fake port with no device connected.
1545          * So we create two port groups with one port each
1546          * and set the group with port B to unavailable.
1547          */
1548         port_a = 0x1; /* relative port A */
1549         port_b = 0x2; /* relative port B */
1550         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1551             (devip->channel & 0x7f);
1552         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1553             (devip->channel & 0x7f) + 0x80;
1554
1555         /*
1556          * The asymmetric access state is cycled according to the host_id.
1557          */
1558         n = 4;
1559         if (0 == scsi_debug_vpd_use_hostno) {
1560             arr[n++] = host_no % 3; /* Asymm access state */
1561             arr[n++] = 0x0F; /* claim: all states are supported */
1562         } else {
1563             arr[n++] = 0x0; /* Active/Optimized path */
1564             arr[n++] = 0x01; /* claim: only support active/optimized paths */
1565         }
1566         arr[n++] = (port_group_a >> 8) & 0xff;
1567         arr[n++] = port_group_a & 0xff;
1568         arr[n++] = 0;    /* Reserved */
1569         arr[n++] = 0;    /* Status code */
1570         arr[n++] = 0;    /* Vendor unique */
1571         arr[n++] = 0x1;  /* One port per group */
1572         arr[n++] = 0;    /* Reserved */
1573         arr[n++] = 0;    /* Reserved */
1574         arr[n++] = (port_a >> 8) & 0xff;
1575         arr[n++] = port_a & 0xff;
1576         arr[n++] = 3;    /* Port unavailable */
1577         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1578         arr[n++] = (port_group_b >> 8) & 0xff;
1579         arr[n++] = port_group_b & 0xff;
1580         arr[n++] = 0;    /* Reserved */
1581         arr[n++] = 0;    /* Status code */
1582         arr[n++] = 0;    /* Vendor unique */
1583         arr[n++] = 0x1;  /* One port per group */
1584         arr[n++] = 0;    /* Reserved */
1585         arr[n++] = 0;    /* Reserved */
1586         arr[n++] = (port_b >> 8) & 0xff;
1587         arr[n++] = port_b & 0xff;
1588
1589         rlen = n - 4;
1590         arr[0] = (rlen >> 24) & 0xff;
1591         arr[1] = (rlen >> 16) & 0xff;
1592         arr[2] = (rlen >> 8) & 0xff;
1593         arr[3] = rlen & 0xff;
1594
1595         /*
1596          * Return the smallest value of either
1597          * - The allocated length
1598          * - The constructed command length
1599          * - The maximum array size
1600          */
1601         rlen = min(alen,n);
1602         ret = fill_from_dev_buffer(scp, arr,
1603                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1604         kfree(arr);
1605         return ret;
1606 }
1607
1608 static int
1609 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1610 {
1611         bool rctd;
1612         u8 reporting_opts, req_opcode, sdeb_i, supp;
1613         u16 req_sa, u;
1614         u32 alloc_len, a_len;
1615         int k, offset, len, errsts, count, bump, na;
1616         const struct opcode_info_t *oip;
1617         const struct opcode_info_t *r_oip;
1618         u8 *arr;
1619         u8 *cmd = scp->cmnd;
1620
1621         rctd = !!(cmd[2] & 0x80);
1622         reporting_opts = cmd[2] & 0x7;
1623         req_opcode = cmd[3];
1624         req_sa = get_unaligned_be16(cmd + 4);
1625         alloc_len = get_unaligned_be32(cmd + 6);
1626         if (alloc_len < 4 || alloc_len > 0xffff) {
1627                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1628                 return check_condition_result;
1629         }
1630         if (alloc_len > 8192)
1631                 a_len = 8192;
1632         else
1633                 a_len = alloc_len;
1634         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1635         if (NULL == arr) {
1636                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1637                                 INSUFF_RES_ASCQ);
1638                 return check_condition_result;
1639         }
1640         switch (reporting_opts) {
1641         case 0: /* all commands */
1642                 /* count number of commands */
1643                 for (count = 0, oip = opcode_info_arr;
1644                      oip->num_attached != 0xff; ++oip) {
1645                         if (F_INV_OP & oip->flags)
1646                                 continue;
1647                         count += (oip->num_attached + 1);
1648                 }
1649                 bump = rctd ? 20 : 8;
1650                 put_unaligned_be32(count * bump, arr);
1651                 for (offset = 4, oip = opcode_info_arr;
1652                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1653                         if (F_INV_OP & oip->flags)
1654                                 continue;
1655                         na = oip->num_attached;
1656                         arr[offset] = oip->opcode;
1657                         put_unaligned_be16(oip->sa, arr + offset + 2);
1658                         if (rctd)
1659                                 arr[offset + 5] |= 0x2;
1660                         if (FF_SA & oip->flags)
1661                                 arr[offset + 5] |= 0x1;
1662                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1663                         if (rctd)
1664                                 put_unaligned_be16(0xa, arr + offset + 8);
1665                         r_oip = oip;
1666                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1667                                 if (F_INV_OP & oip->flags)
1668                                         continue;
1669                                 offset += bump;
1670                                 arr[offset] = oip->opcode;
1671                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1672                                 if (rctd)
1673                                         arr[offset + 5] |= 0x2;
1674                                 if (FF_SA & oip->flags)
1675                                         arr[offset + 5] |= 0x1;
1676                                 put_unaligned_be16(oip->len_mask[0],
1677                                                    arr + offset + 6);
1678                                 if (rctd)
1679                                         put_unaligned_be16(0xa,
1680                                                            arr + offset + 8);
1681                         }
1682                         oip = r_oip;
1683                         offset += bump;
1684                 }
1685                 break;
1686         case 1: /* one command: opcode only */
1687         case 2: /* one command: opcode plus service action */
1688         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1689                 sdeb_i = opcode_ind_arr[req_opcode];
1690                 oip = &opcode_info_arr[sdeb_i];
1691                 if (F_INV_OP & oip->flags) {
1692                         supp = 1;
1693                         offset = 4;
1694                 } else {
1695                         if (1 == reporting_opts) {
1696                                 if (FF_SA & oip->flags) {
1697                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1698                                                              2, 2);
1699                                         kfree(arr);
1700                                         return check_condition_result;
1701                                 }
1702                                 req_sa = 0;
1703                         } else if (2 == reporting_opts &&
1704                                    0 == (FF_SA & oip->flags)) {
1705                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1706                                 kfree(arr);     /* point at requested sa */
1707                                 return check_condition_result;
1708                         }
1709                         if (0 == (FF_SA & oip->flags) &&
1710                             req_opcode == oip->opcode)
1711                                 supp = 3;
1712                         else if (0 == (FF_SA & oip->flags)) {
1713                                 na = oip->num_attached;
1714                                 for (k = 0, oip = oip->arrp; k < na;
1715                                      ++k, ++oip) {
1716                                         if (req_opcode == oip->opcode)
1717                                                 break;
1718                                 }
1719                                 supp = (k >= na) ? 1 : 3;
1720                         } else if (req_sa != oip->sa) {
1721                                 na = oip->num_attached;
1722                                 for (k = 0, oip = oip->arrp; k < na;
1723                                      ++k, ++oip) {
1724                                         if (req_sa == oip->sa)
1725                                                 break;
1726                                 }
1727                                 supp = (k >= na) ? 1 : 3;
1728                         } else
1729                                 supp = 3;
1730                         if (3 == supp) {
1731                                 u = oip->len_mask[0];
1732                                 put_unaligned_be16(u, arr + 2);
1733                                 arr[4] = oip->opcode;
1734                                 for (k = 1; k < u; ++k)
1735                                         arr[4 + k] = (k < 16) ?
1736                                                  oip->len_mask[k] : 0xff;
1737                                 offset = 4 + u;
1738                         } else
1739                                 offset = 4;
1740                 }
1741                 arr[1] = (rctd ? 0x80 : 0) | supp;
1742                 if (rctd) {
1743                         put_unaligned_be16(0xa, arr + offset);
1744                         offset += 12;
1745                 }
1746                 break;
1747         default:
1748                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1749                 kfree(arr);
1750                 return check_condition_result;
1751         }
1752         offset = (offset < a_len) ? offset : a_len;
1753         len = (offset < alloc_len) ? offset : alloc_len;
1754         errsts = fill_from_dev_buffer(scp, arr, len);
1755         kfree(arr);
1756         return errsts;
1757 }
1758
1759 static int
1760 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1761 {
1762         bool repd;
1763         u32 alloc_len, len;
1764         u8 arr[16];
1765         u8 *cmd = scp->cmnd;
1766
1767         memset(arr, 0, sizeof(arr));
1768         repd = !!(cmd[2] & 0x80);
1769         alloc_len = get_unaligned_be32(cmd + 6);
1770         if (alloc_len < 4) {
1771                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1772                 return check_condition_result;
1773         }
1774         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1775         arr[1] = 0x1;           /* ITNRS */
1776         if (repd) {
1777                 arr[3] = 0xc;
1778                 len = 16;
1779         } else
1780                 len = 4;
1781
1782         len = (len < alloc_len) ? len : alloc_len;
1783         return fill_from_dev_buffer(scp, arr, len);
1784 }
1785
1786 /* <<Following mode page info copied from ST318451LW>> */
1787
1788 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1789 {       /* Read-Write Error Recovery page for mode_sense */
1790         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1791                                         5, 0, 0xff, 0xff};
1792
1793         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1794         if (1 == pcontrol)
1795                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1796         return sizeof(err_recov_pg);
1797 }
1798
1799 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1800 {       /* Disconnect-Reconnect page for mode_sense */
1801         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1802                                          0, 0, 0, 0, 0, 0, 0, 0};
1803
1804         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1805         if (1 == pcontrol)
1806                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1807         return sizeof(disconnect_pg);
1808 }
1809
1810 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1811 {       /* Format device page for mode_sense */
1812         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1813                                      0, 0, 0, 0, 0, 0, 0, 0,
1814                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1815
1816         memcpy(p, format_pg, sizeof(format_pg));
1817         p[10] = (sdebug_sectors_per >> 8) & 0xff;
1818         p[11] = sdebug_sectors_per & 0xff;
1819         p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1820         p[13] = scsi_debug_sector_size & 0xff;
1821         if (scsi_debug_removable)
1822                 p[20] |= 0x20; /* should agree with INQUIRY */
1823         if (1 == pcontrol)
1824                 memset(p + 2, 0, sizeof(format_pg) - 2);
1825         return sizeof(format_pg);
1826 }
1827
1828 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1829 {       /* Caching page for mode_sense */
1830         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1831                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1832         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1833                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1834
1835         if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1836                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1837         memcpy(p, caching_pg, sizeof(caching_pg));
1838         if (1 == pcontrol)
1839                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1840         else if (2 == pcontrol)
1841                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1842         return sizeof(caching_pg);
1843 }
1844
1845 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1846 {       /* Control mode page for mode_sense */
1847         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1848                                         0, 0, 0, 0};
1849         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1850                                      0, 0, 0x2, 0x4b};
1851
1852         if (scsi_debug_dsense)
1853                 ctrl_m_pg[2] |= 0x4;
1854         else
1855                 ctrl_m_pg[2] &= ~0x4;
1856
1857         if (scsi_debug_ato)
1858                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1859
1860         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1861         if (1 == pcontrol)
1862                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1863         else if (2 == pcontrol)
1864                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1865         return sizeof(ctrl_m_pg);
1866 }
1867
1868
1869 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1870 {       /* Informational Exceptions control mode page for mode_sense */
1871         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1872                                        0, 0, 0x0, 0x0};
1873         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1874                                       0, 0, 0x0, 0x0};
1875
1876         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1877         if (1 == pcontrol)
1878                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1879         else if (2 == pcontrol)
1880                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1881         return sizeof(iec_m_pg);
1882 }
1883
1884 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1885 {       /* SAS SSP mode page - short format for mode_sense */
1886         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1887                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1888
1889         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1890         if (1 == pcontrol)
1891                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1892         return sizeof(sas_sf_m_pg);
1893 }
1894
1895
1896 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1897                               int target_dev_id)
1898 {       /* SAS phy control and discover mode page for mode_sense */
1899         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1900                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1901                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1902                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1903                     0x2, 0, 0, 0, 0, 0, 0, 0,
1904                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1905                     0, 0, 0, 0, 0, 0, 0, 0,
1906                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1907                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1908                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1909                     0x3, 0, 0, 0, 0, 0, 0, 0,
1910                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1911                     0, 0, 0, 0, 0, 0, 0, 0,
1912                 };
1913         int port_a, port_b;
1914
1915         port_a = target_dev_id + 1;
1916         port_b = port_a + 1;
1917         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1918         p[20] = (port_a >> 24);
1919         p[21] = (port_a >> 16) & 0xff;
1920         p[22] = (port_a >> 8) & 0xff;
1921         p[23] = port_a & 0xff;
1922         p[48 + 20] = (port_b >> 24);
1923         p[48 + 21] = (port_b >> 16) & 0xff;
1924         p[48 + 22] = (port_b >> 8) & 0xff;
1925         p[48 + 23] = port_b & 0xff;
1926         if (1 == pcontrol)
1927                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1928         return sizeof(sas_pcd_m_pg);
1929 }
1930
1931 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1932 {       /* SAS SSP shared protocol specific port mode subpage */
1933         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1934                     0, 0, 0, 0, 0, 0, 0, 0,
1935                 };
1936
1937         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1938         if (1 == pcontrol)
1939                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1940         return sizeof(sas_sha_m_pg);
1941 }
1942
1943 #define SDEBUG_MAX_MSENSE_SZ 256
1944
1945 static int
1946 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1947 {
1948         unsigned char dbd, llbaa;
1949         int pcontrol, pcode, subpcode, bd_len;
1950         unsigned char dev_spec;
1951         int k, alloc_len, msense_6, offset, len, target_dev_id;
1952         int target = scp->device->id;
1953         unsigned char * ap;
1954         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1955         unsigned char *cmd = scp->cmnd;
1956
1957         dbd = !!(cmd[1] & 0x8);
1958         pcontrol = (cmd[2] & 0xc0) >> 6;
1959         pcode = cmd[2] & 0x3f;
1960         subpcode = cmd[3];
1961         msense_6 = (MODE_SENSE == cmd[0]);
1962         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1963         if ((0 == scsi_debug_ptype) && (0 == dbd))
1964                 bd_len = llbaa ? 16 : 8;
1965         else
1966                 bd_len = 0;
1967         alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1968         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1969         if (0x3 == pcontrol) {  /* Saving values not supported */
1970                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
1971                 return check_condition_result;
1972         }
1973         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1974                         (devip->target * 1000) - 3;
1975         /* set DPOFUA bit for disks */
1976         if (0 == scsi_debug_ptype)
1977                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1978         else
1979                 dev_spec = 0x0;
1980         if (msense_6) {
1981                 arr[2] = dev_spec;
1982                 arr[3] = bd_len;
1983                 offset = 4;
1984         } else {
1985                 arr[3] = dev_spec;
1986                 if (16 == bd_len)
1987                         arr[4] = 0x1;   /* set LONGLBA bit */
1988                 arr[7] = bd_len;        /* assume 255 or less */
1989                 offset = 8;
1990         }
1991         ap = arr + offset;
1992         if ((bd_len > 0) && (!sdebug_capacity))
1993                 sdebug_capacity = get_sdebug_capacity();
1994
1995         if (8 == bd_len) {
1996                 if (sdebug_capacity > 0xfffffffe) {
1997                         ap[0] = 0xff;
1998                         ap[1] = 0xff;
1999                         ap[2] = 0xff;
2000                         ap[3] = 0xff;
2001                 } else {
2002                         ap[0] = (sdebug_capacity >> 24) & 0xff;
2003                         ap[1] = (sdebug_capacity >> 16) & 0xff;
2004                         ap[2] = (sdebug_capacity >> 8) & 0xff;
2005                         ap[3] = sdebug_capacity & 0xff;
2006                 }
2007                 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2008                 ap[7] = scsi_debug_sector_size & 0xff;
2009                 offset += bd_len;
2010                 ap = arr + offset;
2011         } else if (16 == bd_len) {
2012                 unsigned long long capac = sdebug_capacity;
2013
2014                 for (k = 0; k < 8; ++k, capac >>= 8)
2015                         ap[7 - k] = capac & 0xff;
2016                 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2017                 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2018                 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2019                 ap[15] = scsi_debug_sector_size & 0xff;
2020                 offset += bd_len;
2021                 ap = arr + offset;
2022         }
2023
2024         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2025                 /* TODO: Control Extension page */
2026                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2027                 return check_condition_result;
2028         }
2029         switch (pcode) {
2030         case 0x1:       /* Read-Write error recovery page, direct access */
2031                 len = resp_err_recov_pg(ap, pcontrol, target);
2032                 offset += len;
2033                 break;
2034         case 0x2:       /* Disconnect-Reconnect page, all devices */
2035                 len = resp_disconnect_pg(ap, pcontrol, target);
2036                 offset += len;
2037                 break;
2038         case 0x3:       /* Format device page, direct access */
2039                 len = resp_format_pg(ap, pcontrol, target);
2040                 offset += len;
2041                 break;
2042         case 0x8:       /* Caching page, direct access */
2043                 len = resp_caching_pg(ap, pcontrol, target);
2044                 offset += len;
2045                 break;
2046         case 0xa:       /* Control Mode page, all devices */
2047                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2048                 offset += len;
2049                 break;
2050         case 0x19:      /* if spc==1 then sas phy, control+discover */
2051                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2052                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2053                         return check_condition_result;
2054                 }
2055                 len = 0;
2056                 if ((0x0 == subpcode) || (0xff == subpcode))
2057                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2058                 if ((0x1 == subpcode) || (0xff == subpcode))
2059                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2060                                                   target_dev_id);
2061                 if ((0x2 == subpcode) || (0xff == subpcode))
2062                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2063                 offset += len;
2064                 break;
2065         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2066                 len = resp_iec_m_pg(ap, pcontrol, target);
2067                 offset += len;
2068                 break;
2069         case 0x3f:      /* Read all Mode pages */
2070                 if ((0 == subpcode) || (0xff == subpcode)) {
2071                         len = resp_err_recov_pg(ap, pcontrol, target);
2072                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2073                         len += resp_format_pg(ap + len, pcontrol, target);
2074                         len += resp_caching_pg(ap + len, pcontrol, target);
2075                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2076                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2077                         if (0xff == subpcode) {
2078                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2079                                                   target, target_dev_id);
2080                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2081                         }
2082                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2083                 } else {
2084                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2085                         return check_condition_result;
2086                 }
2087                 offset += len;
2088                 break;
2089         default:
2090                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2091                 return check_condition_result;
2092         }
2093         if (msense_6)
2094                 arr[0] = offset - 1;
2095         else {
2096                 arr[0] = ((offset - 2) >> 8) & 0xff;
2097                 arr[1] = (offset - 2) & 0xff;
2098         }
2099         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2100 }
2101
2102 #define SDEBUG_MAX_MSELECT_SZ 512
2103
2104 static int
2105 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2106 {
2107         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2108         int param_len, res, mpage;
2109         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2110         unsigned char *cmd = scp->cmnd;
2111         int mselect6 = (MODE_SELECT == cmd[0]);
2112
2113         memset(arr, 0, sizeof(arr));
2114         pf = cmd[1] & 0x10;
2115         sp = cmd[1] & 0x1;
2116         param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2117         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2118                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2119                 return check_condition_result;
2120         }
2121         res = fetch_to_dev_buffer(scp, arr, param_len);
2122         if (-1 == res)
2123                 return (DID_ERROR << 16);
2124         else if ((res < param_len) &&
2125                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2126                 sdev_printk(KERN_INFO, scp->device,
2127                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2128                             __func__, param_len, res);
2129         md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2130         bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2131         if (md_len > 2) {
2132                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2133                 return check_condition_result;
2134         }
2135         off = bd_len + (mselect6 ? 4 : 8);
2136         mpage = arr[off] & 0x3f;
2137         ps = !!(arr[off] & 0x80);
2138         if (ps) {
2139                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2140                 return check_condition_result;
2141         }
2142         spf = !!(arr[off] & 0x40);
2143         pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2144                        (arr[off + 1] + 2);
2145         if ((pg_len + off) > param_len) {
2146                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2147                                 PARAMETER_LIST_LENGTH_ERR, 0);
2148                 return check_condition_result;
2149         }
2150         switch (mpage) {
2151         case 0x8:      /* Caching Mode page */
2152                 if (caching_pg[1] == arr[off + 1]) {
2153                         memcpy(caching_pg + 2, arr + off + 2,
2154                                sizeof(caching_pg) - 2);
2155                         goto set_mode_changed_ua;
2156                 }
2157                 break;
2158         case 0xa:      /* Control Mode page */
2159                 if (ctrl_m_pg[1] == arr[off + 1]) {
2160                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2161                                sizeof(ctrl_m_pg) - 2);
2162                         scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2163                         goto set_mode_changed_ua;
2164                 }
2165                 break;
2166         case 0x1c:      /* Informational Exceptions Mode page */
2167                 if (iec_m_pg[1] == arr[off + 1]) {
2168                         memcpy(iec_m_pg + 2, arr + off + 2,
2169                                sizeof(iec_m_pg) - 2);
2170                         goto set_mode_changed_ua;
2171                 }
2172                 break;
2173         default:
2174                 break;
2175         }
2176         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2177         return check_condition_result;
2178 set_mode_changed_ua:
2179         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2180         return 0;
2181 }
2182
2183 static int resp_temp_l_pg(unsigned char * arr)
2184 {
2185         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2186                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2187                 };
2188
2189         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2190         return sizeof(temp_l_pg);
2191 }
2192
2193 static int resp_ie_l_pg(unsigned char * arr)
2194 {
2195         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2196                 };
2197
2198         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2199         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2200                 arr[4] = THRESHOLD_EXCEEDED;
2201                 arr[5] = 0xff;
2202         }
2203         return sizeof(ie_l_pg);
2204 }
2205
2206 #define SDEBUG_MAX_LSENSE_SZ 512
2207
2208 static int resp_log_sense(struct scsi_cmnd * scp,
2209                           struct sdebug_dev_info * devip)
2210 {
2211         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2212         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2213         unsigned char *cmd = scp->cmnd;
2214
2215         memset(arr, 0, sizeof(arr));
2216         ppc = cmd[1] & 0x2;
2217         sp = cmd[1] & 0x1;
2218         if (ppc || sp) {
2219                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2220                 return check_condition_result;
2221         }
2222         pcontrol = (cmd[2] & 0xc0) >> 6;
2223         pcode = cmd[2] & 0x3f;
2224         subpcode = cmd[3] & 0xff;
2225         alloc_len = (cmd[7] << 8) + cmd[8];
2226         arr[0] = pcode;
2227         if (0 == subpcode) {
2228                 switch (pcode) {
2229                 case 0x0:       /* Supported log pages log page */
2230                         n = 4;
2231                         arr[n++] = 0x0;         /* this page */
2232                         arr[n++] = 0xd;         /* Temperature */
2233                         arr[n++] = 0x2f;        /* Informational exceptions */
2234                         arr[3] = n - 4;
2235                         break;
2236                 case 0xd:       /* Temperature log page */
2237                         arr[3] = resp_temp_l_pg(arr + 4);
2238                         break;
2239                 case 0x2f:      /* Informational exceptions log page */
2240                         arr[3] = resp_ie_l_pg(arr + 4);
2241                         break;
2242                 default:
2243                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2244                         return check_condition_result;
2245                 }
2246         } else if (0xff == subpcode) {
2247                 arr[0] |= 0x40;
2248                 arr[1] = subpcode;
2249                 switch (pcode) {
2250                 case 0x0:       /* Supported log pages and subpages log page */
2251                         n = 4;
2252                         arr[n++] = 0x0;
2253                         arr[n++] = 0x0;         /* 0,0 page */
2254                         arr[n++] = 0x0;
2255                         arr[n++] = 0xff;        /* this page */
2256                         arr[n++] = 0xd;
2257                         arr[n++] = 0x0;         /* Temperature */
2258                         arr[n++] = 0x2f;
2259                         arr[n++] = 0x0; /* Informational exceptions */
2260                         arr[3] = n - 4;
2261                         break;
2262                 case 0xd:       /* Temperature subpages */
2263                         n = 4;
2264                         arr[n++] = 0xd;
2265                         arr[n++] = 0x0;         /* Temperature */
2266                         arr[3] = n - 4;
2267                         break;
2268                 case 0x2f:      /* Informational exceptions subpages */
2269                         n = 4;
2270                         arr[n++] = 0x2f;
2271                         arr[n++] = 0x0;         /* Informational exceptions */
2272                         arr[3] = n - 4;
2273                         break;
2274                 default:
2275                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2276                         return check_condition_result;
2277                 }
2278         } else {
2279                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2280                 return check_condition_result;
2281         }
2282         len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2283         return fill_from_dev_buffer(scp, arr,
2284                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2285 }
2286
2287 static int check_device_access_params(struct scsi_cmnd *scp,
2288                                       unsigned long long lba, unsigned int num)
2289 {
2290         if (lba + num > sdebug_capacity) {
2291                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2292                 return check_condition_result;
2293         }
2294         /* transfer length excessive (tie in to block limits VPD page) */
2295         if (num > sdebug_store_sectors) {
2296                 /* needs work to find which cdb byte 'num' comes from */
2297                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2298                 return check_condition_result;
2299         }
2300         return 0;
2301 }
2302
2303 /* Returns number of bytes copied or -1 if error. */
2304 static int
2305 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2306 {
2307         int ret;
2308         u64 block, rest = 0;
2309         struct scsi_data_buffer *sdb;
2310         enum dma_data_direction dir;
2311         size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
2312                        off_t);
2313
2314         if (do_write) {
2315                 sdb = scsi_out(scmd);
2316                 dir = DMA_TO_DEVICE;
2317                 func = sg_pcopy_to_buffer;
2318         } else {
2319                 sdb = scsi_in(scmd);
2320                 dir = DMA_FROM_DEVICE;
2321                 func = sg_pcopy_from_buffer;
2322         }
2323
2324         if (!sdb->length)
2325                 return 0;
2326         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2327                 return -1;
2328
2329         block = do_div(lba, sdebug_store_sectors);
2330         if (block + num > sdebug_store_sectors)
2331                 rest = block + num - sdebug_store_sectors;
2332
2333         ret = func(sdb->table.sgl, sdb->table.nents,
2334                    fake_storep + (block * scsi_debug_sector_size),
2335                    (num - rest) * scsi_debug_sector_size, 0);
2336         if (ret != (num - rest) * scsi_debug_sector_size)
2337                 return ret;
2338
2339         if (rest) {
2340                 ret += func(sdb->table.sgl, sdb->table.nents,
2341                             fake_storep, rest * scsi_debug_sector_size,
2342                             (num - rest) * scsi_debug_sector_size);
2343         }
2344
2345         return ret;
2346 }
2347
2348 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2349  * arr into fake_store(lba,num) and return true. If comparison fails then
2350  * return false. */
2351 static bool
2352 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2353 {
2354         bool res;
2355         u64 block, rest = 0;
2356         u32 store_blks = sdebug_store_sectors;
2357         u32 lb_size = scsi_debug_sector_size;
2358
2359         block = do_div(lba, store_blks);
2360         if (block + num > store_blks)
2361                 rest = block + num - store_blks;
2362
2363         res = !memcmp(fake_storep + (block * lb_size), arr,
2364                       (num - rest) * lb_size);
2365         if (!res)
2366                 return res;
2367         if (rest)
2368                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2369                              rest * lb_size);
2370         if (!res)
2371                 return res;
2372         arr += num * lb_size;
2373         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2374         if (rest)
2375                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2376                        rest * lb_size);
2377         return res;
2378 }
2379
2380 static __be16 dif_compute_csum(const void *buf, int len)
2381 {
2382         __be16 csum;
2383
2384         if (scsi_debug_guard)
2385                 csum = (__force __be16)ip_compute_csum(buf, len);
2386         else
2387                 csum = cpu_to_be16(crc_t10dif(buf, len));
2388
2389         return csum;
2390 }
2391
2392 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2393                       sector_t sector, u32 ei_lba)
2394 {
2395         __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2396
2397         if (sdt->guard_tag != csum) {
2398                 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2399                         __func__,
2400                         (unsigned long)sector,
2401                         be16_to_cpu(sdt->guard_tag),
2402                         be16_to_cpu(csum));
2403                 return 0x01;
2404         }
2405         if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2406             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2407                 pr_err("%s: REF check failed on sector %lu\n",
2408                         __func__, (unsigned long)sector);
2409                 return 0x03;
2410         }
2411         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2412             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2413                 pr_err("%s: REF check failed on sector %lu\n",
2414                         __func__, (unsigned long)sector);
2415                 return 0x03;
2416         }
2417         return 0;
2418 }
2419
2420 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2421                           unsigned int sectors, bool read)
2422 {
2423         size_t resid;
2424         void *paddr;
2425         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2426         struct sg_mapping_iter miter;
2427
2428         /* Bytes of protection data to copy into sgl */
2429         resid = sectors * sizeof(*dif_storep);
2430
2431         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2432                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2433                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2434
2435         while (sg_miter_next(&miter) && resid > 0) {
2436                 size_t len = min(miter.length, resid);
2437                 void *start = dif_store(sector);
2438                 size_t rest = 0;
2439
2440                 if (dif_store_end < start + len)
2441                         rest = start + len - dif_store_end;
2442
2443                 paddr = miter.addr;
2444
2445                 if (read)
2446                         memcpy(paddr, start, len - rest);
2447                 else
2448                         memcpy(start, paddr, len - rest);
2449
2450                 if (rest) {
2451                         if (read)
2452                                 memcpy(paddr + len - rest, dif_storep, rest);
2453                         else
2454                                 memcpy(dif_storep, paddr + len - rest, rest);
2455                 }
2456
2457                 sector += len / sizeof(*dif_storep);
2458                 resid -= len;
2459         }
2460         sg_miter_stop(&miter);
2461 }
2462
2463 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2464                             unsigned int sectors, u32 ei_lba)
2465 {
2466         unsigned int i;
2467         struct sd_dif_tuple *sdt;
2468         sector_t sector;
2469
2470         for (i = 0; i < sectors; i++, ei_lba++) {
2471                 int ret;
2472
2473                 sector = start_sec + i;
2474                 sdt = dif_store(sector);
2475
2476                 if (sdt->app_tag == cpu_to_be16(0xffff))
2477                         continue;
2478
2479                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2480                 if (ret) {
2481                         dif_errors++;
2482                         return ret;
2483                 }
2484         }
2485
2486         dif_copy_prot(SCpnt, start_sec, sectors, true);
2487         dix_reads++;
2488
2489         return 0;
2490 }
2491
2492 static int
2493 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2494 {
2495         u8 *cmd = scp->cmnd;
2496         u64 lba;
2497         u32 num;
2498         u32 ei_lba;
2499         unsigned long iflags;
2500         int ret;
2501         bool check_prot;
2502
2503         switch (cmd[0]) {
2504         case READ_16:
2505                 ei_lba = 0;
2506                 lba = get_unaligned_be64(cmd + 2);
2507                 num = get_unaligned_be32(cmd + 10);
2508                 check_prot = true;
2509                 break;
2510         case READ_10:
2511                 ei_lba = 0;
2512                 lba = get_unaligned_be32(cmd + 2);
2513                 num = get_unaligned_be16(cmd + 7);
2514                 check_prot = true;
2515                 break;
2516         case READ_6:
2517                 ei_lba = 0;
2518                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2519                       (u32)(cmd[1] & 0x1f) << 16;
2520                 num = (0 == cmd[4]) ? 256 : cmd[4];
2521                 check_prot = true;
2522                 break;
2523         case READ_12:
2524                 ei_lba = 0;
2525                 lba = get_unaligned_be32(cmd + 2);
2526                 num = get_unaligned_be32(cmd + 6);
2527                 check_prot = true;
2528                 break;
2529         case XDWRITEREAD_10:
2530                 ei_lba = 0;
2531                 lba = get_unaligned_be32(cmd + 2);
2532                 num = get_unaligned_be16(cmd + 7);
2533                 check_prot = false;
2534                 break;
2535         default:        /* assume READ(32) */
2536                 lba = get_unaligned_be64(cmd + 12);
2537                 ei_lba = get_unaligned_be32(cmd + 20);
2538                 num = get_unaligned_be32(cmd + 28);
2539                 check_prot = false;
2540                 break;
2541         }
2542         if (check_prot) {
2543                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2544                     (cmd[1] & 0xe0)) {
2545                         mk_sense_invalid_opcode(scp);
2546                         return check_condition_result;
2547                 }
2548                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2549                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2550                     (cmd[1] & 0xe0) == 0)
2551                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2552                                     "to DIF device\n");
2553         }
2554         if (sdebug_any_injecting_opt) {
2555                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2556
2557                 if (ep->inj_short)
2558                         num /= 2;
2559         }
2560
2561         /* inline check_device_access_params() */
2562         if (lba + num > sdebug_capacity) {
2563                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2564                 return check_condition_result;
2565         }
2566         /* transfer length excessive (tie in to block limits VPD page) */
2567         if (num > sdebug_store_sectors) {
2568                 /* needs work to find which cdb byte 'num' comes from */
2569                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2570                 return check_condition_result;
2571         }
2572
2573         if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2574             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2575             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2576                 /* claim unrecoverable read error */
2577                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2578                 /* set info field and valid bit for fixed descriptor */
2579                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2580                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2581                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2582                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2583                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2584                 }
2585                 scsi_set_resid(scp, scsi_bufflen(scp));
2586                 return check_condition_result;
2587         }
2588
2589         read_lock_irqsave(&atomic_rw, iflags);
2590
2591         /* DIX + T10 DIF */
2592         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2593                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2594
2595                 if (prot_ret) {
2596                         read_unlock_irqrestore(&atomic_rw, iflags);
2597                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2598                         return illegal_condition_result;
2599                 }
2600         }
2601
2602         ret = do_device_access(scp, lba, num, false);
2603         read_unlock_irqrestore(&atomic_rw, iflags);
2604         if (ret == -1)
2605                 return DID_ERROR << 16;
2606
2607         scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2608
2609         if (sdebug_any_injecting_opt) {
2610                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2611
2612                 if (ep->inj_recovered) {
2613                         mk_sense_buffer(scp, RECOVERED_ERROR,
2614                                         THRESHOLD_EXCEEDED, 0);
2615                         return check_condition_result;
2616                 } else if (ep->inj_transport) {
2617                         mk_sense_buffer(scp, ABORTED_COMMAND,
2618                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2619                         return check_condition_result;
2620                 } else if (ep->inj_dif) {
2621                         /* Logical block guard check failed */
2622                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2623                         return illegal_condition_result;
2624                 } else if (ep->inj_dix) {
2625                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2626                         return illegal_condition_result;
2627                 }
2628         }
2629         return 0;
2630 }
2631
2632 void dump_sector(unsigned char *buf, int len)
2633 {
2634         int i, j, n;
2635
2636         pr_err(">>> Sector Dump <<<\n");
2637         for (i = 0 ; i < len ; i += 16) {
2638                 char b[128];
2639
2640                 for (j = 0, n = 0; j < 16; j++) {
2641                         unsigned char c = buf[i+j];
2642
2643                         if (c >= 0x20 && c < 0x7e)
2644                                 n += scnprintf(b + n, sizeof(b) - n,
2645                                                " %c ", buf[i+j]);
2646                         else
2647                                 n += scnprintf(b + n, sizeof(b) - n,
2648                                                "%02x ", buf[i+j]);
2649                 }
2650                 pr_err("%04d: %s\n", i, b);
2651         }
2652 }
2653
2654 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2655                              unsigned int sectors, u32 ei_lba)
2656 {
2657         int ret;
2658         struct sd_dif_tuple *sdt;
2659         void *daddr;
2660         sector_t sector = start_sec;
2661         int ppage_offset;
2662         int dpage_offset;
2663         struct sg_mapping_iter diter;
2664         struct sg_mapping_iter piter;
2665
2666         BUG_ON(scsi_sg_count(SCpnt) == 0);
2667         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2668
2669         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2670                         scsi_prot_sg_count(SCpnt),
2671                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2672         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2673                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2674
2675         /* For each protection page */
2676         while (sg_miter_next(&piter)) {
2677                 dpage_offset = 0;
2678                 if (WARN_ON(!sg_miter_next(&diter))) {
2679                         ret = 0x01;
2680                         goto out;
2681                 }
2682
2683                 for (ppage_offset = 0; ppage_offset < piter.length;
2684                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2685                         /* If we're at the end of the current
2686                          * data page advance to the next one
2687                          */
2688                         if (dpage_offset >= diter.length) {
2689                                 if (WARN_ON(!sg_miter_next(&diter))) {
2690                                         ret = 0x01;
2691                                         goto out;
2692                                 }
2693                                 dpage_offset = 0;
2694                         }
2695
2696                         sdt = piter.addr + ppage_offset;
2697                         daddr = diter.addr + dpage_offset;
2698
2699                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2700                         if (ret) {
2701                                 dump_sector(daddr, scsi_debug_sector_size);
2702                                 goto out;
2703                         }
2704
2705                         sector++;
2706                         ei_lba++;
2707                         dpage_offset += scsi_debug_sector_size;
2708                 }
2709                 diter.consumed = dpage_offset;
2710                 sg_miter_stop(&diter);
2711         }
2712         sg_miter_stop(&piter);
2713
2714         dif_copy_prot(SCpnt, start_sec, sectors, false);
2715         dix_writes++;
2716
2717         return 0;
2718
2719 out:
2720         dif_errors++;
2721         sg_miter_stop(&diter);
2722         sg_miter_stop(&piter);
2723         return ret;
2724 }
2725
2726 static unsigned long lba_to_map_index(sector_t lba)
2727 {
2728         if (scsi_debug_unmap_alignment) {
2729                 lba += scsi_debug_unmap_granularity -
2730                         scsi_debug_unmap_alignment;
2731         }
2732         do_div(lba, scsi_debug_unmap_granularity);
2733
2734         return lba;
2735 }
2736
2737 static sector_t map_index_to_lba(unsigned long index)
2738 {
2739         sector_t lba = index * scsi_debug_unmap_granularity;
2740
2741         if (scsi_debug_unmap_alignment) {
2742                 lba -= scsi_debug_unmap_granularity -
2743                         scsi_debug_unmap_alignment;
2744         }
2745
2746         return lba;
2747 }
2748
2749 static unsigned int map_state(sector_t lba, unsigned int *num)
2750 {
2751         sector_t end;
2752         unsigned int mapped;
2753         unsigned long index;
2754         unsigned long next;
2755
2756         index = lba_to_map_index(lba);
2757         mapped = test_bit(index, map_storep);
2758
2759         if (mapped)
2760                 next = find_next_zero_bit(map_storep, map_size, index);
2761         else
2762                 next = find_next_bit(map_storep, map_size, index);
2763
2764         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2765         *num = end - lba;
2766
2767         return mapped;
2768 }
2769
2770 static void map_region(sector_t lba, unsigned int len)
2771 {
2772         sector_t end = lba + len;
2773
2774         while (lba < end) {
2775                 unsigned long index = lba_to_map_index(lba);
2776
2777                 if (index < map_size)
2778                         set_bit(index, map_storep);
2779
2780                 lba = map_index_to_lba(index + 1);
2781         }
2782 }
2783
2784 static void unmap_region(sector_t lba, unsigned int len)
2785 {
2786         sector_t end = lba + len;
2787
2788         while (lba < end) {
2789                 unsigned long index = lba_to_map_index(lba);
2790
2791                 if (lba == map_index_to_lba(index) &&
2792                     lba + scsi_debug_unmap_granularity <= end &&
2793                     index < map_size) {
2794                         clear_bit(index, map_storep);
2795                         if (scsi_debug_lbprz) {
2796                                 memset(fake_storep +
2797                                        lba * scsi_debug_sector_size, 0,
2798                                        scsi_debug_sector_size *
2799                                        scsi_debug_unmap_granularity);
2800                         }
2801                         if (dif_storep) {
2802                                 memset(dif_storep + lba, 0xff,
2803                                        sizeof(*dif_storep) *
2804                                        scsi_debug_unmap_granularity);
2805                         }
2806                 }
2807                 lba = map_index_to_lba(index + 1);
2808         }
2809 }
2810
2811 static int
2812 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2813 {
2814         u8 *cmd = scp->cmnd;
2815         u64 lba;
2816         u32 num;
2817         u32 ei_lba;
2818         unsigned long iflags;
2819         int ret;
2820         bool check_prot;
2821
2822         switch (cmd[0]) {
2823         case WRITE_16:
2824                 ei_lba = 0;
2825                 lba = get_unaligned_be64(cmd + 2);
2826                 num = get_unaligned_be32(cmd + 10);
2827                 check_prot = true;
2828                 break;
2829         case WRITE_10:
2830                 ei_lba = 0;
2831                 lba = get_unaligned_be32(cmd + 2);
2832                 num = get_unaligned_be16(cmd + 7);
2833                 check_prot = true;
2834                 break;
2835         case WRITE_6:
2836                 ei_lba = 0;
2837                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2838                       (u32)(cmd[1] & 0x1f) << 16;
2839                 num = (0 == cmd[4]) ? 256 : cmd[4];
2840                 check_prot = true;
2841                 break;
2842         case WRITE_12:
2843                 ei_lba = 0;
2844                 lba = get_unaligned_be32(cmd + 2);
2845                 num = get_unaligned_be32(cmd + 6);
2846                 check_prot = true;
2847                 break;
2848         case 0x53:      /* XDWRITEREAD(10) */
2849                 ei_lba = 0;
2850                 lba = get_unaligned_be32(cmd + 2);
2851                 num = get_unaligned_be16(cmd + 7);
2852                 check_prot = false;
2853                 break;
2854         default:        /* assume WRITE(32) */
2855                 lba = get_unaligned_be64(cmd + 12);
2856                 ei_lba = get_unaligned_be32(cmd + 20);
2857                 num = get_unaligned_be32(cmd + 28);
2858                 check_prot = false;
2859                 break;
2860         }
2861         if (check_prot) {
2862                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2863                     (cmd[1] & 0xe0)) {
2864                         mk_sense_invalid_opcode(scp);
2865                         return check_condition_result;
2866                 }
2867                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2868                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2869                     (cmd[1] & 0xe0) == 0)
2870                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2871                                     "to DIF device\n");
2872         }
2873
2874         /* inline check_device_access_params() */
2875         if (lba + num > sdebug_capacity) {
2876                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2877                 return check_condition_result;
2878         }
2879         /* transfer length excessive (tie in to block limits VPD page) */
2880         if (num > sdebug_store_sectors) {
2881                 /* needs work to find which cdb byte 'num' comes from */
2882                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2883                 return check_condition_result;
2884         }
2885
2886         write_lock_irqsave(&atomic_rw, iflags);
2887
2888         /* DIX + T10 DIF */
2889         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2890                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2891
2892                 if (prot_ret) {
2893                         write_unlock_irqrestore(&atomic_rw, iflags);
2894                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2895                         return illegal_condition_result;
2896                 }
2897         }
2898
2899         ret = do_device_access(scp, lba, num, true);
2900         if (scsi_debug_lbp())
2901                 map_region(lba, num);
2902         write_unlock_irqrestore(&atomic_rw, iflags);
2903         if (-1 == ret)
2904                 return (DID_ERROR << 16);
2905         else if ((ret < (num * scsi_debug_sector_size)) &&
2906                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2907                 sdev_printk(KERN_INFO, scp->device,
2908                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2909                             my_name, num * scsi_debug_sector_size, ret);
2910
2911         if (sdebug_any_injecting_opt) {
2912                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2913
2914                 if (ep->inj_recovered) {
2915                         mk_sense_buffer(scp, RECOVERED_ERROR,
2916                                         THRESHOLD_EXCEEDED, 0);
2917                         return check_condition_result;
2918                 } else if (ep->inj_dif) {
2919                         /* Logical block guard check failed */
2920                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2921                         return illegal_condition_result;
2922                 } else if (ep->inj_dix) {
2923                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2924                         return illegal_condition_result;
2925                 }
2926         }
2927         return 0;
2928 }
2929
2930 static int
2931 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2932                 bool unmap, bool ndob)
2933 {
2934         unsigned long iflags;
2935         unsigned long long i;
2936         int ret;
2937
2938         ret = check_device_access_params(scp, lba, num);
2939         if (ret)
2940                 return ret;
2941
2942         write_lock_irqsave(&atomic_rw, iflags);
2943
2944         if (unmap && scsi_debug_lbp()) {
2945                 unmap_region(lba, num);
2946                 goto out;
2947         }
2948
2949         /* if ndob then zero 1 logical block, else fetch 1 logical block */
2950         if (ndob) {
2951                 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
2952                        scsi_debug_sector_size);
2953                 ret = 0;
2954         } else
2955                 ret = fetch_to_dev_buffer(scp, fake_storep +
2956                                                (lba * scsi_debug_sector_size),
2957                                           scsi_debug_sector_size);
2958
2959         if (-1 == ret) {
2960                 write_unlock_irqrestore(&atomic_rw, iflags);
2961                 return (DID_ERROR << 16);
2962         } else if ((ret < (num * scsi_debug_sector_size)) &&
2963                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2964                 sdev_printk(KERN_INFO, scp->device,
2965                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
2966                             my_name, "write same",
2967                             num * scsi_debug_sector_size, ret);
2968
2969         /* Copy first sector to remaining blocks */
2970         for (i = 1 ; i < num ; i++)
2971                 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2972                        fake_storep + (lba * scsi_debug_sector_size),
2973                        scsi_debug_sector_size);
2974
2975         if (scsi_debug_lbp())
2976                 map_region(lba, num);
2977 out:
2978         write_unlock_irqrestore(&atomic_rw, iflags);
2979
2980         return 0;
2981 }
2982
2983 static int
2984 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2985 {
2986         u8 *cmd = scp->cmnd;
2987         u32 lba;
2988         u16 num;
2989         u32 ei_lba = 0;
2990         bool unmap = false;
2991
2992         if (cmd[1] & 0x8) {
2993                 if (scsi_debug_lbpws10 == 0) {
2994                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
2995                         return check_condition_result;
2996                 } else
2997                         unmap = true;
2998         }
2999         lba = get_unaligned_be32(cmd + 2);
3000         num = get_unaligned_be16(cmd + 7);
3001         if (num > scsi_debug_write_same_length) {
3002                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3003                 return check_condition_result;
3004         }
3005         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3006 }
3007
3008 static int
3009 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3010 {
3011         u8 *cmd = scp->cmnd;
3012         u64 lba;
3013         u32 num;
3014         u32 ei_lba = 0;
3015         bool unmap = false;
3016         bool ndob = false;
3017
3018         if (cmd[1] & 0x8) {     /* UNMAP */
3019                 if (scsi_debug_lbpws == 0) {
3020                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3021                         return check_condition_result;
3022                 } else
3023                         unmap = true;
3024         }
3025         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3026                 ndob = true;
3027         lba = get_unaligned_be64(cmd + 2);
3028         num = get_unaligned_be32(cmd + 10);
3029         if (num > scsi_debug_write_same_length) {
3030                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3031                 return check_condition_result;
3032         }
3033         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3034 }
3035
3036 static int
3037 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3038 {
3039         u8 *cmd = scp->cmnd;
3040         u8 *arr;
3041         u8 *fake_storep_hold;
3042         u64 lba;
3043         u32 dnum;
3044         u32 lb_size = scsi_debug_sector_size;
3045         u8 num;
3046         unsigned long iflags;
3047         int ret;
3048         int retval = 0;
3049
3050         lba = get_unaligned_be64(cmd + 2);
3051         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3052         if (0 == num)
3053                 return 0;       /* degenerate case, not an error */
3054         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3055             (cmd[1] & 0xe0)) {
3056                 mk_sense_invalid_opcode(scp);
3057                 return check_condition_result;
3058         }
3059         if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3060              scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3061             (cmd[1] & 0xe0) == 0)
3062                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3063                             "to DIF device\n");
3064
3065         /* inline check_device_access_params() */
3066         if (lba + num > sdebug_capacity) {
3067                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3068                 return check_condition_result;
3069         }
3070         /* transfer length excessive (tie in to block limits VPD page) */
3071         if (num > sdebug_store_sectors) {
3072                 /* needs work to find which cdb byte 'num' comes from */
3073                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3074                 return check_condition_result;
3075         }
3076         dnum = 2 * num;
3077         arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3078         if (NULL == arr) {
3079                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3080                                 INSUFF_RES_ASCQ);
3081                 return check_condition_result;
3082         }
3083
3084         write_lock_irqsave(&atomic_rw, iflags);
3085
3086         /* trick do_device_access() to fetch both compare and write buffers
3087          * from data-in into arr. Safe (atomic) since write_lock held. */
3088         fake_storep_hold = fake_storep;
3089         fake_storep = arr;
3090         ret = do_device_access(scp, 0, dnum, true);
3091         fake_storep = fake_storep_hold;
3092         if (ret == -1) {
3093                 retval = DID_ERROR << 16;
3094                 goto cleanup;
3095         } else if ((ret < (dnum * lb_size)) &&
3096                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3097                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3098                             "indicated=%u, IO sent=%d bytes\n", my_name,
3099                             dnum * lb_size, ret);
3100         if (!comp_write_worker(lba, num, arr)) {
3101                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3102                 retval = check_condition_result;
3103                 goto cleanup;
3104         }
3105         if (scsi_debug_lbp())
3106                 map_region(lba, num);
3107 cleanup:
3108         write_unlock_irqrestore(&atomic_rw, iflags);
3109         kfree(arr);
3110         return retval;
3111 }
3112
3113 struct unmap_block_desc {
3114         __be64  lba;
3115         __be32  blocks;
3116         __be32  __reserved;
3117 };
3118
3119 static int
3120 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3121 {
3122         unsigned char *buf;
3123         struct unmap_block_desc *desc;
3124         unsigned int i, payload_len, descriptors;
3125         int ret;
3126         unsigned long iflags;
3127
3128
3129         if (!scsi_debug_lbp())
3130                 return 0;       /* fib and say its done */
3131         payload_len = get_unaligned_be16(scp->cmnd + 7);
3132         BUG_ON(scsi_bufflen(scp) != payload_len);
3133
3134         descriptors = (payload_len - 8) / 16;
3135         if (descriptors > scsi_debug_unmap_max_desc) {
3136                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3137                 return check_condition_result;
3138         }
3139
3140         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3141         if (!buf) {
3142                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3143                                 INSUFF_RES_ASCQ);
3144                 return check_condition_result;
3145         }
3146
3147         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3148
3149         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3150         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3151
3152         desc = (void *)&buf[8];
3153
3154         write_lock_irqsave(&atomic_rw, iflags);
3155
3156         for (i = 0 ; i < descriptors ; i++) {
3157                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3158                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3159
3160                 ret = check_device_access_params(scp, lba, num);
3161                 if (ret)
3162                         goto out;
3163
3164                 unmap_region(lba, num);
3165         }
3166
3167         ret = 0;
3168
3169 out:
3170         write_unlock_irqrestore(&atomic_rw, iflags);
3171         kfree(buf);
3172
3173         return ret;
3174 }
3175
3176 #define SDEBUG_GET_LBA_STATUS_LEN 32
3177
3178 static int
3179 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3180 {
3181         u8 *cmd = scp->cmnd;
3182         u64 lba;
3183         u32 alloc_len, mapped, num;
3184         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3185         int ret;
3186
3187         lba = get_unaligned_be64(cmd + 2);
3188         alloc_len = get_unaligned_be32(cmd + 10);
3189
3190         if (alloc_len < 24)
3191                 return 0;
3192
3193         ret = check_device_access_params(scp, lba, 1);
3194         if (ret)
3195                 return ret;
3196
3197         if (scsi_debug_lbp())
3198                 mapped = map_state(lba, &num);
3199         else {
3200                 mapped = 1;
3201                 /* following just in case virtual_gb changed */
3202                 sdebug_capacity = get_sdebug_capacity();
3203                 if (sdebug_capacity - lba <= 0xffffffff)
3204                         num = sdebug_capacity - lba;
3205                 else
3206                         num = 0xffffffff;
3207         }
3208
3209         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3210         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3211         put_unaligned_be64(lba, arr + 8);       /* LBA */
3212         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3213         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3214
3215         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3216 }
3217
3218 #define SDEBUG_RLUN_ARR_SZ 256
3219
3220 static int resp_report_luns(struct scsi_cmnd * scp,
3221                             struct sdebug_dev_info * devip)
3222 {
3223         unsigned int alloc_len;
3224         int lun_cnt, i, upper, num, n, want_wlun, shortish;
3225         u64 lun;
3226         unsigned char *cmd = scp->cmnd;
3227         int select_report = (int)cmd[2];
3228         struct scsi_lun *one_lun;
3229         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3230         unsigned char * max_addr;
3231
3232         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3233         shortish = (alloc_len < 4);
3234         if (shortish || (select_report > 2)) {
3235                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3236                 return check_condition_result;
3237         }
3238         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3239         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3240         lun_cnt = scsi_debug_max_luns;
3241         if (1 == select_report)
3242                 lun_cnt = 0;
3243         else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3244                 --lun_cnt;
3245         want_wlun = (select_report > 0) ? 1 : 0;
3246         num = lun_cnt + want_wlun;
3247         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3248         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3249         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3250                             sizeof(struct scsi_lun)), num);
3251         if (n < num) {
3252                 want_wlun = 0;
3253                 lun_cnt = n;
3254         }
3255         one_lun = (struct scsi_lun *) &arr[8];
3256         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3257         for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3258              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3259              i++, lun++) {
3260                 upper = (lun >> 8) & 0x3f;
3261                 if (upper)
3262                         one_lun[i].scsi_lun[0] =
3263                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3264                 one_lun[i].scsi_lun[1] = lun & 0xff;
3265         }
3266         if (want_wlun) {
3267                 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
3268                 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
3269                 i++;
3270         }
3271         alloc_len = (unsigned char *)(one_lun + i) - arr;
3272         return fill_from_dev_buffer(scp, arr,
3273                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3274 }
3275
3276 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3277                             unsigned int num, struct sdebug_dev_info *devip)
3278 {
3279         int j;
3280         unsigned char *kaddr, *buf;
3281         unsigned int offset;
3282         struct scsi_data_buffer *sdb = scsi_in(scp);
3283         struct sg_mapping_iter miter;
3284
3285         /* better not to use temporary buffer. */
3286         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3287         if (!buf) {
3288                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3289                                 INSUFF_RES_ASCQ);
3290                 return check_condition_result;
3291         }
3292
3293         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3294
3295         offset = 0;
3296         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3297                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
3298
3299         while (sg_miter_next(&miter)) {
3300                 kaddr = miter.addr;
3301                 for (j = 0; j < miter.length; j++)
3302                         *(kaddr + j) ^= *(buf + offset + j);
3303
3304                 offset += miter.length;
3305         }
3306         sg_miter_stop(&miter);
3307         kfree(buf);
3308
3309         return 0;
3310 }
3311
3312 static int
3313 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3314 {
3315         u8 *cmd = scp->cmnd;
3316         u64 lba;
3317         u32 num;
3318         int errsts;
3319
3320         if (!scsi_bidi_cmnd(scp)) {
3321                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3322                                 INSUFF_RES_ASCQ);
3323                 return check_condition_result;
3324         }
3325         errsts = resp_read_dt0(scp, devip);
3326         if (errsts)
3327                 return errsts;
3328         if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3329                 errsts = resp_write_dt0(scp, devip);
3330                 if (errsts)
3331                         return errsts;
3332         }
3333         lba = get_unaligned_be32(cmd + 2);
3334         num = get_unaligned_be16(cmd + 7);
3335         return resp_xdwriteread(scp, lba, num, devip);
3336 }
3337
3338 /* When timer or tasklet goes off this function is called. */
3339 static void sdebug_q_cmd_complete(unsigned long indx)
3340 {
3341         int qa_indx;
3342         int retiring = 0;
3343         unsigned long iflags;
3344         struct sdebug_queued_cmd *sqcp;
3345         struct scsi_cmnd *scp;
3346         struct sdebug_dev_info *devip;
3347
3348         atomic_inc(&sdebug_completions);
3349         qa_indx = indx;
3350         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3351                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3352                 return;
3353         }
3354         spin_lock_irqsave(&queued_arr_lock, iflags);
3355         sqcp = &queued_arr[qa_indx];
3356         scp = sqcp->a_cmnd;
3357         if (NULL == scp) {
3358                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3359                 pr_err("%s: scp is NULL\n", __func__);
3360                 return;
3361         }
3362         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3363         if (devip)
3364                 atomic_dec(&devip->num_in_q);
3365         else
3366                 pr_err("%s: devip=NULL\n", __func__);
3367         if (atomic_read(&retired_max_queue) > 0)
3368                 retiring = 1;
3369
3370         sqcp->a_cmnd = NULL;
3371         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3372                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3373                 pr_err("%s: Unexpected completion\n", __func__);
3374                 return;
3375         }
3376
3377         if (unlikely(retiring)) {       /* user has reduced max_queue */
3378                 int k, retval;
3379
3380                 retval = atomic_read(&retired_max_queue);
3381                 if (qa_indx >= retval) {
3382                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3383                         pr_err("%s: index %d too large\n", __func__, retval);
3384                         return;
3385                 }
3386                 k = find_last_bit(queued_in_use_bm, retval);
3387                 if ((k < scsi_debug_max_queue) || (k == retval))
3388                         atomic_set(&retired_max_queue, 0);
3389                 else
3390                         atomic_set(&retired_max_queue, k + 1);
3391         }
3392         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3393         scp->scsi_done(scp); /* callback to mid level */
3394 }
3395
3396 /* When high resolution timer goes off this function is called. */
3397 static enum hrtimer_restart
3398 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3399 {
3400         int qa_indx;
3401         int retiring = 0;
3402         unsigned long iflags;
3403         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3404         struct sdebug_queued_cmd *sqcp;
3405         struct scsi_cmnd *scp;
3406         struct sdebug_dev_info *devip;
3407
3408         atomic_inc(&sdebug_completions);
3409         qa_indx = sd_hrtp->qa_indx;
3410         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3411                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3412                 goto the_end;
3413         }
3414         spin_lock_irqsave(&queued_arr_lock, iflags);
3415         sqcp = &queued_arr[qa_indx];
3416         scp = sqcp->a_cmnd;
3417         if (NULL == scp) {
3418                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3419                 pr_err("%s: scp is NULL\n", __func__);
3420                 goto the_end;
3421         }
3422         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3423         if (devip)
3424                 atomic_dec(&devip->num_in_q);
3425         else
3426                 pr_err("%s: devip=NULL\n", __func__);
3427         if (atomic_read(&retired_max_queue) > 0)
3428                 retiring = 1;
3429
3430         sqcp->a_cmnd = NULL;
3431         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3432                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3433                 pr_err("%s: Unexpected completion\n", __func__);
3434                 goto the_end;
3435         }
3436
3437         if (unlikely(retiring)) {       /* user has reduced max_queue */
3438                 int k, retval;
3439
3440                 retval = atomic_read(&retired_max_queue);
3441                 if (qa_indx >= retval) {
3442                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3443                         pr_err("%s: index %d too large\n", __func__, retval);
3444                         goto the_end;
3445                 }
3446                 k = find_last_bit(queued_in_use_bm, retval);
3447                 if ((k < scsi_debug_max_queue) || (k == retval))
3448                         atomic_set(&retired_max_queue, 0);
3449                 else
3450                         atomic_set(&retired_max_queue, k + 1);
3451         }
3452         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3453         scp->scsi_done(scp); /* callback to mid level */
3454 the_end:
3455         return HRTIMER_NORESTART;
3456 }
3457
3458 static struct sdebug_dev_info *
3459 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3460 {
3461         struct sdebug_dev_info *devip;
3462
3463         devip = kzalloc(sizeof(*devip), flags);
3464         if (devip) {
3465                 devip->sdbg_host = sdbg_host;
3466                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3467         }
3468         return devip;
3469 }
3470
3471 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3472 {
3473         struct sdebug_host_info * sdbg_host;
3474         struct sdebug_dev_info * open_devip = NULL;
3475         struct sdebug_dev_info * devip =
3476                         (struct sdebug_dev_info *)sdev->hostdata;
3477
3478         if (devip)
3479                 return devip;
3480         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3481         if (!sdbg_host) {
3482                 pr_err("%s: Host info NULL\n", __func__);
3483                 return NULL;
3484         }
3485         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3486                 if ((devip->used) && (devip->channel == sdev->channel) &&
3487                     (devip->target == sdev->id) &&
3488                     (devip->lun == sdev->lun))
3489                         return devip;
3490                 else {
3491                         if ((!devip->used) && (!open_devip))
3492                                 open_devip = devip;
3493                 }
3494         }
3495         if (!open_devip) { /* try and make a new one */
3496                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3497                 if (!open_devip) {
3498                         printk(KERN_ERR "%s: out of memory at line %d\n",
3499                                 __func__, __LINE__);
3500                         return NULL;
3501                 }
3502         }
3503
3504         open_devip->channel = sdev->channel;
3505         open_devip->target = sdev->id;
3506         open_devip->lun = sdev->lun;
3507         open_devip->sdbg_host = sdbg_host;
3508         atomic_set(&open_devip->num_in_q, 0);
3509         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3510         open_devip->used = true;
3511         return open_devip;
3512 }
3513
3514 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3515 {
3516         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3517                 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
3518                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3519         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3520         return 0;
3521 }
3522
3523 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3524 {
3525         struct sdebug_dev_info *devip;
3526
3527         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3528                 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
3529                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3530         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3531                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3532         devip = devInfoReg(sdp);
3533         if (NULL == devip)
3534                 return 1;       /* no resources, will be marked offline */
3535         sdp->hostdata = devip;
3536         blk_queue_max_segment_size(sdp->request_queue, -1U);
3537         if (scsi_debug_no_uld)
3538                 sdp->no_uld_attach = 1;
3539         return 0;
3540 }
3541
3542 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3543 {
3544         struct sdebug_dev_info *devip =
3545                 (struct sdebug_dev_info *)sdp->hostdata;
3546
3547         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3548                 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
3549                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3550         if (devip) {
3551                 /* make this slot available for re-use */
3552                 devip->used = false;
3553                 sdp->hostdata = NULL;
3554         }
3555 }
3556
3557 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3558 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3559 {
3560         unsigned long iflags;
3561         int k, qmax, r_qmax;
3562         struct sdebug_queued_cmd *sqcp;
3563         struct sdebug_dev_info *devip;
3564
3565         spin_lock_irqsave(&queued_arr_lock, iflags);
3566         qmax = scsi_debug_max_queue;
3567         r_qmax = atomic_read(&retired_max_queue);
3568         if (r_qmax > qmax)
3569                 qmax = r_qmax;
3570         for (k = 0; k < qmax; ++k) {
3571                 if (test_bit(k, queued_in_use_bm)) {
3572                         sqcp = &queued_arr[k];
3573                         if (cmnd == sqcp->a_cmnd) {
3574                                 devip = (struct sdebug_dev_info *)
3575                                         cmnd->device->hostdata;
3576                                 if (devip)
3577                                         atomic_dec(&devip->num_in_q);
3578                                 sqcp->a_cmnd = NULL;
3579                                 spin_unlock_irqrestore(&queued_arr_lock,
3580                                                        iflags);
3581                                 if (scsi_debug_ndelay > 0) {
3582                                         if (sqcp->sd_hrtp)
3583                                                 hrtimer_cancel(
3584                                                         &sqcp->sd_hrtp->hrt);
3585                                 } else if (scsi_debug_delay > 0) {
3586                                         if (sqcp->cmnd_timerp)
3587                                                 del_timer_sync(
3588                                                         sqcp->cmnd_timerp);
3589                                 } else if (scsi_debug_delay < 0) {
3590                                         if (sqcp->tletp)
3591                                                 tasklet_kill(sqcp->tletp);
3592                                 }
3593                                 clear_bit(k, queued_in_use_bm);
3594                                 return 1;
3595                         }
3596                 }
3597         }
3598         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3599         return 0;
3600 }
3601
3602 /* Deletes (stops) timers or tasklets of all queued commands */
3603 static void stop_all_queued(void)
3604 {
3605         unsigned long iflags;
3606         int k;
3607         struct sdebug_queued_cmd *sqcp;
3608         struct sdebug_dev_info *devip;
3609
3610         spin_lock_irqsave(&queued_arr_lock, iflags);
3611         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3612                 if (test_bit(k, queued_in_use_bm)) {
3613                         sqcp = &queued_arr[k];
3614                         if (sqcp->a_cmnd) {
3615                                 devip = (struct sdebug_dev_info *)
3616                                         sqcp->a_cmnd->device->hostdata;
3617                                 if (devip)
3618                                         atomic_dec(&devip->num_in_q);
3619                                 sqcp->a_cmnd = NULL;
3620                                 spin_unlock_irqrestore(&queued_arr_lock,
3621                                                        iflags);
3622                                 if (scsi_debug_ndelay > 0) {
3623                                         if (sqcp->sd_hrtp)
3624                                                 hrtimer_cancel(
3625                                                         &sqcp->sd_hrtp->hrt);
3626                                 } else if (scsi_debug_delay > 0) {
3627                                         if (sqcp->cmnd_timerp)
3628                                                 del_timer_sync(
3629                                                         sqcp->cmnd_timerp);
3630                                 } else if (scsi_debug_delay < 0) {
3631                                         if (sqcp->tletp)
3632                                                 tasklet_kill(sqcp->tletp);
3633                                 }
3634                                 clear_bit(k, queued_in_use_bm);
3635                                 spin_lock_irqsave(&queued_arr_lock, iflags);
3636                         }
3637                 }
3638         }
3639         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3640 }
3641
3642 /* Free queued command memory on heap */
3643 static void free_all_queued(void)
3644 {
3645         unsigned long iflags;
3646         int k;
3647         struct sdebug_queued_cmd *sqcp;
3648
3649         spin_lock_irqsave(&queued_arr_lock, iflags);
3650         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3651                 sqcp = &queued_arr[k];
3652                 kfree(sqcp->cmnd_timerp);
3653                 sqcp->cmnd_timerp = NULL;
3654                 kfree(sqcp->tletp);
3655                 sqcp->tletp = NULL;
3656                 kfree(sqcp->sd_hrtp);
3657                 sqcp->sd_hrtp = NULL;
3658         }
3659         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3660 }
3661
3662 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3663 {
3664         ++num_aborts;
3665         if (SCpnt) {
3666                 if (SCpnt->device &&
3667                     (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3668                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3669                                     __func__);
3670                 stop_queued_cmnd(SCpnt);
3671         }
3672         return SUCCESS;
3673 }
3674
3675 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3676 {
3677         struct sdebug_dev_info * devip;
3678
3679         ++num_dev_resets;
3680         if (SCpnt && SCpnt->device) {
3681                 struct scsi_device *sdp = SCpnt->device;
3682
3683                 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3684                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3685                 devip = devInfoReg(sdp);
3686                 if (devip)
3687                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
3688         }
3689         return SUCCESS;
3690 }
3691
3692 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3693 {
3694         struct sdebug_host_info *sdbg_host;
3695         struct sdebug_dev_info *devip;
3696         struct scsi_device *sdp;
3697         struct Scsi_Host *hp;
3698         int k = 0;
3699
3700         ++num_target_resets;
3701         if (!SCpnt)
3702                 goto lie;
3703         sdp = SCpnt->device;
3704         if (!sdp)
3705                 goto lie;
3706         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3707                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3708         hp = sdp->host;
3709         if (!hp)
3710                 goto lie;
3711         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3712         if (sdbg_host) {
3713                 list_for_each_entry(devip,
3714                                     &sdbg_host->dev_info_list,
3715                                     dev_list)
3716                         if (devip->target == sdp->id) {
3717                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3718                                 ++k;
3719                         }
3720         }
3721         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3722                 sdev_printk(KERN_INFO, sdp,
3723                             "%s: %d device(s) found in target\n", __func__, k);
3724 lie:
3725         return SUCCESS;
3726 }
3727
3728 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3729 {
3730         struct sdebug_host_info *sdbg_host;
3731         struct sdebug_dev_info *devip;
3732         struct scsi_device * sdp;
3733         struct Scsi_Host * hp;
3734         int k = 0;
3735
3736         ++num_bus_resets;
3737         if (!(SCpnt && SCpnt->device))
3738                 goto lie;
3739         sdp = SCpnt->device;
3740         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3741                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3742         hp = sdp->host;
3743         if (hp) {
3744                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3745                 if (sdbg_host) {
3746                         list_for_each_entry(devip,
3747                                             &sdbg_host->dev_info_list,
3748                                             dev_list) {
3749                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3750                                 ++k;
3751                         }
3752                 }
3753         }
3754         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3755                 sdev_printk(KERN_INFO, sdp,
3756                             "%s: %d device(s) found in host\n", __func__, k);
3757 lie:
3758         return SUCCESS;
3759 }
3760
3761 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3762 {
3763         struct sdebug_host_info * sdbg_host;
3764         struct sdebug_dev_info *devip;
3765         int k = 0;
3766
3767         ++num_host_resets;
3768         if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3769                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3770         spin_lock(&sdebug_host_list_lock);
3771         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3772                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3773                                     dev_list) {
3774                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3775                         ++k;
3776                 }
3777         }
3778         spin_unlock(&sdebug_host_list_lock);
3779         stop_all_queued();
3780         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3781                 sdev_printk(KERN_INFO, SCpnt->device,
3782                             "%s: %d device(s) found\n", __func__, k);
3783         return SUCCESS;
3784 }
3785
3786 static void __init sdebug_build_parts(unsigned char *ramp,
3787                                       unsigned long store_size)
3788 {
3789         struct partition * pp;
3790         int starts[SDEBUG_MAX_PARTS + 2];
3791         int sectors_per_part, num_sectors, k;
3792         int heads_by_sects, start_sec, end_sec;
3793
3794         /* assume partition table already zeroed */
3795         if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3796                 return;
3797         if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3798                 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3799                 pr_warn("%s: reducing partitions to %d\n", __func__,
3800                         SDEBUG_MAX_PARTS);
3801         }
3802         num_sectors = (int)sdebug_store_sectors;
3803         sectors_per_part = (num_sectors - sdebug_sectors_per)
3804                            / scsi_debug_num_parts;
3805         heads_by_sects = sdebug_heads * sdebug_sectors_per;
3806         starts[0] = sdebug_sectors_per;
3807         for (k = 1; k < scsi_debug_num_parts; ++k)
3808                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3809                             * heads_by_sects;
3810         starts[scsi_debug_num_parts] = num_sectors;
3811         starts[scsi_debug_num_parts + 1] = 0;
3812
3813         ramp[510] = 0x55;       /* magic partition markings */
3814         ramp[511] = 0xAA;
3815         pp = (struct partition *)(ramp + 0x1be);
3816         for (k = 0; starts[k + 1]; ++k, ++pp) {
3817                 start_sec = starts[k];
3818                 end_sec = starts[k + 1] - 1;
3819                 pp->boot_ind = 0;
3820
3821                 pp->cyl = start_sec / heads_by_sects;
3822                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3823                            / sdebug_sectors_per;
3824                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3825
3826                 pp->end_cyl = end_sec / heads_by_sects;
3827                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3828                                / sdebug_sectors_per;
3829                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3830
3831                 pp->start_sect = cpu_to_le32(start_sec);
3832                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3833                 pp->sys_ind = 0x83;     /* plain Linux partition */
3834         }
3835 }
3836
3837 static int
3838 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3839               int scsi_result, int delta_jiff)
3840 {
3841         unsigned long iflags;
3842         int k, num_in_q, qdepth, inject;
3843         struct sdebug_queued_cmd *sqcp = NULL;
3844         struct scsi_device *sdp = cmnd->device;
3845
3846         if (NULL == cmnd || NULL == devip) {
3847                 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3848                         __func__);
3849                 /* no particularly good error to report back */
3850                 return SCSI_MLQUEUE_HOST_BUSY;
3851         }
3852         if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3853                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3854                             __func__, scsi_result);
3855         if (delta_jiff == 0)
3856                 goto respond_in_thread;
3857
3858         /* schedule the response at a later time if resources permit */
3859         spin_lock_irqsave(&queued_arr_lock, iflags);
3860         num_in_q = atomic_read(&devip->num_in_q);
3861         qdepth = cmnd->device->queue_depth;
3862         inject = 0;
3863         if ((qdepth > 0) && (num_in_q >= qdepth)) {
3864                 if (scsi_result) {
3865                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3866                         goto respond_in_thread;
3867                 } else
3868                         scsi_result = device_qfull_result;
3869         } else if ((scsi_debug_every_nth != 0) &&
3870                    (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3871                    (scsi_result == 0)) {
3872                 if ((num_in_q == (qdepth - 1)) &&
3873                     (atomic_inc_return(&sdebug_a_tsf) >=
3874                      abs(scsi_debug_every_nth))) {
3875                         atomic_set(&sdebug_a_tsf, 0);
3876                         inject = 1;
3877                         scsi_result = device_qfull_result;
3878                 }
3879         }
3880
3881         k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3882         if (k >= scsi_debug_max_queue) {
3883                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3884                 if (scsi_result)
3885                         goto respond_in_thread;
3886                 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3887                         scsi_result = device_qfull_result;
3888                 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3889                         sdev_printk(KERN_INFO, sdp,
3890                                     "%s: max_queue=%d exceeded, %s\n",
3891                                     __func__, scsi_debug_max_queue,
3892                                     (scsi_result ?  "status: TASK SET FULL" :
3893                                                     "report: host busy"));
3894                 if (scsi_result)
3895                         goto respond_in_thread;
3896                 else
3897                         return SCSI_MLQUEUE_HOST_BUSY;
3898         }
3899         __set_bit(k, queued_in_use_bm);
3900         atomic_inc(&devip->num_in_q);
3901         sqcp = &queued_arr[k];
3902         sqcp->a_cmnd = cmnd;
3903         cmnd->result = scsi_result;
3904         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3905         if (delta_jiff > 0) {
3906                 if (NULL == sqcp->cmnd_timerp) {
3907                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
3908                                                     GFP_ATOMIC);
3909                         if (NULL == sqcp->cmnd_timerp)
3910                                 return SCSI_MLQUEUE_HOST_BUSY;
3911                         init_timer(sqcp->cmnd_timerp);
3912                 }
3913                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
3914                 sqcp->cmnd_timerp->data = k;
3915                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
3916                 add_timer(sqcp->cmnd_timerp);
3917         } else if (scsi_debug_ndelay > 0) {
3918                 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
3919                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
3920
3921                 if (NULL == sd_hp) {
3922                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
3923                         if (NULL == sd_hp)
3924                                 return SCSI_MLQUEUE_HOST_BUSY;
3925                         sqcp->sd_hrtp = sd_hp;
3926                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
3927                                      HRTIMER_MODE_REL);
3928                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
3929                         sd_hp->qa_indx = k;
3930                 }
3931                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
3932         } else {        /* delay < 0 */
3933                 if (NULL == sqcp->tletp) {
3934                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
3935                                               GFP_ATOMIC);
3936                         if (NULL == sqcp->tletp)
3937                                 return SCSI_MLQUEUE_HOST_BUSY;
3938                         tasklet_init(sqcp->tletp,
3939                                      sdebug_q_cmd_complete, k);
3940                 }
3941                 if (-1 == delta_jiff)
3942                         tasklet_hi_schedule(sqcp->tletp);
3943                 else
3944                         tasklet_schedule(sqcp->tletp);
3945         }
3946         if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3947             (scsi_result == device_qfull_result))
3948                 sdev_printk(KERN_INFO, sdp,
3949                             "%s: num_in_q=%d +1, %s%s\n", __func__,
3950                             num_in_q, (inject ? "<inject> " : ""),
3951                             "status: TASK SET FULL");
3952         return 0;
3953
3954 respond_in_thread:      /* call back to mid-layer using invocation thread */
3955         cmnd->result = scsi_result;
3956         cmnd->scsi_done(cmnd);
3957         return 0;
3958 }
3959
3960 /* Note: The following macros create attribute files in the
3961    /sys/module/scsi_debug/parameters directory. Unfortunately this
3962    driver is unaware of a change and cannot trigger auxiliary actions
3963    as it can when the corresponding attribute in the
3964    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
3965  */
3966 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
3967 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
3968 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
3969 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
3970 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
3971 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
3972 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
3973 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
3974 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
3975 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
3976 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
3977 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
3978 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
3979 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
3980 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
3981 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
3982 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
3983 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
3984 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
3985 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
3986 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
3987 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
3988 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
3989 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
3990 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
3991 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
3992 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
3993 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
3994 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
3995 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
3996 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
3997 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
3998 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
3999 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
4000 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4001 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4002 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4003 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4004                    S_IRUGO | S_IWUSR);
4005 module_param_named(write_same_length, scsi_debug_write_same_length, int,
4006                    S_IRUGO | S_IWUSR);
4007
4008 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4009 MODULE_DESCRIPTION("SCSI debug adapter driver");
4010 MODULE_LICENSE("GPL");
4011 MODULE_VERSION(SCSI_DEBUG_VERSION);
4012
4013 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4014 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4015 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4016 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4017 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4018 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4019 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4020 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4021 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4022 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4023 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4024 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4025 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4026 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4027 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4028 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4029 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4030 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4031 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4032 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4033 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4034 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4035 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4036 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4037 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
4038 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4039 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4040 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4041 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4042 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4043 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4044 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4045 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4046 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4047 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4048 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4049 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4050 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4051 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4052
4053 static char sdebug_info[256];
4054
4055 static const char * scsi_debug_info(struct Scsi_Host * shp)
4056 {
4057         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4058                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4059                 scsi_debug_version_date, scsi_debug_dev_size_mb,
4060                 scsi_debug_opts);
4061         return sdebug_info;
4062 }
4063
4064 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4065 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4066 {
4067         char arr[16];
4068         int opts;
4069         int minLen = length > 15 ? 15 : length;
4070
4071         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4072                 return -EACCES;
4073         memcpy(arr, buffer, minLen);
4074         arr[minLen] = '\0';
4075         if (1 != sscanf(arr, "%d", &opts))
4076                 return -EINVAL;
4077         scsi_debug_opts = opts;
4078         if (scsi_debug_every_nth != 0)
4079                 atomic_set(&sdebug_cmnd_count, 0);
4080         return length;
4081 }
4082
4083 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4084  * same for each scsi_debug host (if more than one). Some of the counters
4085  * output are not atomics so might be inaccurate in a busy system. */
4086 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4087 {
4088         int f, l;
4089         char b[32];
4090
4091         if (scsi_debug_every_nth > 0)
4092                 snprintf(b, sizeof(b), " (curr:%d)",
4093                          ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4094                                 atomic_read(&sdebug_a_tsf) :
4095                                 atomic_read(&sdebug_cmnd_count)));
4096         else
4097                 b[0] = '\0';
4098
4099         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4100                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4101                 "every_nth=%d%s\n"
4102                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4103                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4104                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4105                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4106                 "usec_in_jiffy=%lu\n",
4107                 SCSI_DEBUG_VERSION, scsi_debug_version_date,
4108                 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4109                 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4110                 scsi_debug_max_luns, atomic_read(&sdebug_completions),
4111                 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4112                 sdebug_sectors_per, num_aborts, num_dev_resets,
4113                 num_target_resets, num_bus_resets, num_host_resets,
4114                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4115
4116         f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4117         if (f != scsi_debug_max_queue) {
4118                 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4119                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4120                            "queued_in_use_bm", f, l);
4121         }
4122         return 0;
4123 }
4124
4125 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4126 {
4127         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4128 }
4129 /* Returns -EBUSY if delay is being changed and commands are queued */
4130 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4131                            size_t count)
4132 {
4133         int delay, res;
4134
4135         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4136                 res = count;
4137                 if (scsi_debug_delay != delay) {
4138                         unsigned long iflags;
4139                         int k;
4140
4141                         spin_lock_irqsave(&queued_arr_lock, iflags);
4142                         k = find_first_bit(queued_in_use_bm,
4143                                            scsi_debug_max_queue);
4144                         if (k != scsi_debug_max_queue)
4145                                 res = -EBUSY;   /* have queued commands */
4146                         else {
4147                                 scsi_debug_delay = delay;
4148                                 scsi_debug_ndelay = 0;
4149                         }
4150                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4151                 }
4152                 return res;
4153         }
4154         return -EINVAL;
4155 }
4156 static DRIVER_ATTR_RW(delay);
4157
4158 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4159 {
4160         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4161 }
4162 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4163 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4164 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4165                            size_t count)
4166 {
4167         unsigned long iflags;
4168         int ndelay, res, k;
4169
4170         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4171             (ndelay >= 0) && (ndelay < 1000000000)) {
4172                 res = count;
4173                 if (scsi_debug_ndelay != ndelay) {
4174                         spin_lock_irqsave(&queued_arr_lock, iflags);
4175                         k = find_first_bit(queued_in_use_bm,
4176                                            scsi_debug_max_queue);
4177                         if (k != scsi_debug_max_queue)
4178                                 res = -EBUSY;   /* have queued commands */
4179                         else {
4180                                 scsi_debug_ndelay = ndelay;
4181                                 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4182                                                           : DEF_DELAY;
4183                         }
4184                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4185                 }
4186                 return res;
4187         }
4188         return -EINVAL;
4189 }
4190 static DRIVER_ATTR_RW(ndelay);
4191
4192 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4193 {
4194         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4195 }
4196
4197 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4198                           size_t count)
4199 {
4200         int opts;
4201         char work[20];
4202
4203         if (1 == sscanf(buf, "%10s", work)) {
4204                 if (0 == strncasecmp(work,"0x", 2)) {
4205                         if (1 == sscanf(&work[2], "%x", &opts))
4206                                 goto opts_done;
4207                 } else {
4208                         if (1 == sscanf(work, "%d", &opts))
4209                                 goto opts_done;
4210                 }
4211         }
4212         return -EINVAL;
4213 opts_done:
4214         scsi_debug_opts = opts;
4215         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4216                 sdebug_any_injecting_opt = true;
4217         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4218                 sdebug_any_injecting_opt = true;
4219         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4220                 sdebug_any_injecting_opt = true;
4221         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4222                 sdebug_any_injecting_opt = true;
4223         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4224                 sdebug_any_injecting_opt = true;
4225         atomic_set(&sdebug_cmnd_count, 0);
4226         atomic_set(&sdebug_a_tsf, 0);
4227         return count;
4228 }
4229 static DRIVER_ATTR_RW(opts);
4230
4231 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4232 {
4233         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4234 }
4235 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4236                            size_t count)
4237 {
4238         int n;
4239
4240         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4241                 scsi_debug_ptype = n;
4242                 return count;
4243         }
4244         return -EINVAL;
4245 }
4246 static DRIVER_ATTR_RW(ptype);
4247
4248 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4249 {
4250         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4251 }
4252 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4253                             size_t count)
4254 {
4255         int n;
4256
4257         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4258                 scsi_debug_dsense = n;
4259                 return count;
4260         }
4261         return -EINVAL;
4262 }
4263 static DRIVER_ATTR_RW(dsense);
4264
4265 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4266 {
4267         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4268 }
4269 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4270                              size_t count)
4271 {
4272         int n;
4273
4274         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4275                 n = (n > 0);
4276                 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4277                 if (scsi_debug_fake_rw != n) {
4278                         if ((0 == n) && (NULL == fake_storep)) {
4279                                 unsigned long sz =
4280                                         (unsigned long)scsi_debug_dev_size_mb *
4281                                         1048576;
4282
4283                                 fake_storep = vmalloc(sz);
4284                                 if (NULL == fake_storep) {
4285                                         pr_err("%s: out of memory, 9\n",
4286                                                __func__);
4287                                         return -ENOMEM;
4288                                 }
4289                                 memset(fake_storep, 0, sz);
4290                         }
4291                         scsi_debug_fake_rw = n;
4292                 }
4293                 return count;
4294         }
4295         return -EINVAL;
4296 }
4297 static DRIVER_ATTR_RW(fake_rw);
4298
4299 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4300 {
4301         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4302 }
4303 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4304                               size_t count)
4305 {
4306         int n;
4307
4308         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4309                 scsi_debug_no_lun_0 = n;
4310                 return count;
4311         }
4312         return -EINVAL;
4313 }
4314 static DRIVER_ATTR_RW(no_lun_0);
4315
4316 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4317 {
4318         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4319 }
4320 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4321                               size_t count)
4322 {
4323         int n;
4324
4325         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4326                 scsi_debug_num_tgts = n;
4327                 sdebug_max_tgts_luns();
4328                 return count;
4329         }
4330         return -EINVAL;
4331 }
4332 static DRIVER_ATTR_RW(num_tgts);
4333
4334 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4335 {
4336         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4337 }
4338 static DRIVER_ATTR_RO(dev_size_mb);
4339
4340 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4341 {
4342         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4343 }
4344 static DRIVER_ATTR_RO(num_parts);
4345
4346 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4347 {
4348         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4349 }
4350 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4351                                size_t count)
4352 {
4353         int nth;
4354
4355         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4356                 scsi_debug_every_nth = nth;
4357                 atomic_set(&sdebug_cmnd_count, 0);
4358                 return count;
4359         }
4360         return -EINVAL;
4361 }
4362 static DRIVER_ATTR_RW(every_nth);
4363
4364 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4365 {
4366         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4367 }
4368 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4369                               size_t count)
4370 {
4371         int n;
4372
4373         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4374                 scsi_debug_max_luns = n;
4375                 sdebug_max_tgts_luns();
4376                 return count;
4377         }
4378         return -EINVAL;
4379 }
4380 static DRIVER_ATTR_RW(max_luns);
4381
4382 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4383 {
4384         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4385 }
4386 /* N.B. max_queue can be changed while there are queued commands. In flight
4387  * commands beyond the new max_queue will be completed. */
4388 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4389                                size_t count)
4390 {
4391         unsigned long iflags;
4392         int n, k;
4393
4394         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4395             (n <= SCSI_DEBUG_CANQUEUE)) {
4396                 spin_lock_irqsave(&queued_arr_lock, iflags);
4397                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4398                 scsi_debug_max_queue = n;
4399                 if (SCSI_DEBUG_CANQUEUE == k)
4400                         atomic_set(&retired_max_queue, 0);
4401                 else if (k >= n)
4402                         atomic_set(&retired_max_queue, k + 1);
4403                 else
4404                         atomic_set(&retired_max_queue, 0);
4405                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4406                 return count;
4407         }
4408         return -EINVAL;
4409 }
4410 static DRIVER_ATTR_RW(max_queue);
4411
4412 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4413 {
4414         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4415 }
4416 static DRIVER_ATTR_RO(no_uld);
4417
4418 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4419 {
4420         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4421 }
4422 static DRIVER_ATTR_RO(scsi_level);
4423
4424 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4425 {
4426         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4427 }
4428 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4429                                 size_t count)
4430 {
4431         int n;
4432         bool changed;
4433
4434         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4435                 changed = (scsi_debug_virtual_gb != n);
4436                 scsi_debug_virtual_gb = n;
4437                 sdebug_capacity = get_sdebug_capacity();
4438                 if (changed) {
4439                         struct sdebug_host_info *sdhp;
4440                         struct sdebug_dev_info *dp;
4441
4442                         spin_lock(&sdebug_host_list_lock);
4443                         list_for_each_entry(sdhp, &sdebug_host_list,
4444                                             host_list) {
4445                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4446                                                     dev_list) {
4447                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4448                                                 dp->uas_bm);
4449                                 }
4450                         }
4451                         spin_unlock(&sdebug_host_list_lock);
4452                 }
4453                 return count;
4454         }
4455         return -EINVAL;
4456 }
4457 static DRIVER_ATTR_RW(virtual_gb);
4458
4459 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4460 {
4461         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4462 }
4463
4464 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4465                               size_t count)
4466 {
4467         int delta_hosts;
4468
4469         if (sscanf(buf, "%d", &delta_hosts) != 1)
4470                 return -EINVAL;
4471         if (delta_hosts > 0) {
4472                 do {
4473                         sdebug_add_adapter();
4474                 } while (--delta_hosts);
4475         } else if (delta_hosts < 0) {
4476                 do {
4477                         sdebug_remove_adapter();
4478                 } while (++delta_hosts);
4479         }
4480         return count;
4481 }
4482 static DRIVER_ATTR_RW(add_host);
4483
4484 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4485 {
4486         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4487 }
4488 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4489                                     size_t count)
4490 {
4491         int n;
4492
4493         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4494                 scsi_debug_vpd_use_hostno = n;
4495                 return count;
4496         }
4497         return -EINVAL;
4498 }
4499 static DRIVER_ATTR_RW(vpd_use_hostno);
4500
4501 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4502 {
4503         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4504 }
4505 static DRIVER_ATTR_RO(sector_size);
4506
4507 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4508 {
4509         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4510 }
4511 static DRIVER_ATTR_RO(dix);
4512
4513 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4514 {
4515         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4516 }
4517 static DRIVER_ATTR_RO(dif);
4518
4519 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4520 {
4521         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4522 }
4523 static DRIVER_ATTR_RO(guard);
4524
4525 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4526 {
4527         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4528 }
4529 static DRIVER_ATTR_RO(ato);
4530
4531 static ssize_t map_show(struct device_driver *ddp, char *buf)
4532 {
4533         ssize_t count;
4534
4535         if (!scsi_debug_lbp())
4536                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4537                                  sdebug_store_sectors);
4538
4539         count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
4540
4541         buf[count++] = '\n';
4542         buf[count++] = 0;
4543
4544         return count;
4545 }
4546 static DRIVER_ATTR_RO(map);
4547
4548 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4549 {
4550         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4551 }
4552 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4553                                size_t count)
4554 {
4555         int n;
4556
4557         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4558                 scsi_debug_removable = (n > 0);
4559                 return count;
4560         }
4561         return -EINVAL;
4562 }
4563 static DRIVER_ATTR_RW(removable);
4564
4565 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4566 {
4567         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4568 }
4569 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4570 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4571                                size_t count)
4572 {
4573         int n, res;
4574
4575         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4576                 bool new_host_lock = (n > 0);
4577
4578                 res = count;
4579                 if (new_host_lock != scsi_debug_host_lock) {
4580                         unsigned long iflags;
4581                         int k;
4582
4583                         spin_lock_irqsave(&queued_arr_lock, iflags);
4584                         k = find_first_bit(queued_in_use_bm,
4585                                            scsi_debug_max_queue);
4586                         if (k != scsi_debug_max_queue)
4587                                 res = -EBUSY;   /* have queued commands */
4588                         else
4589                                 scsi_debug_host_lock = new_host_lock;
4590                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4591                 }
4592                 return res;
4593         }
4594         return -EINVAL;
4595 }
4596 static DRIVER_ATTR_RW(host_lock);
4597
4598 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4599 {
4600         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4601 }
4602 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4603                             size_t count)
4604 {
4605         int n;
4606
4607         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4608                 scsi_debug_strict = (n > 0);
4609                 return count;
4610         }
4611         return -EINVAL;
4612 }
4613 static DRIVER_ATTR_RW(strict);
4614
4615
4616 /* Note: The following array creates attribute files in the
4617    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4618    files (over those found in the /sys/module/scsi_debug/parameters
4619    directory) is that auxiliary actions can be triggered when an attribute
4620    is changed. For example see: sdebug_add_host_store() above.
4621  */
4622
4623 static struct attribute *sdebug_drv_attrs[] = {
4624         &driver_attr_delay.attr,
4625         &driver_attr_opts.attr,
4626         &driver_attr_ptype.attr,
4627         &driver_attr_dsense.attr,
4628         &driver_attr_fake_rw.attr,
4629         &driver_attr_no_lun_0.attr,
4630         &driver_attr_num_tgts.attr,
4631         &driver_attr_dev_size_mb.attr,
4632         &driver_attr_num_parts.attr,
4633         &driver_attr_every_nth.attr,
4634         &driver_attr_max_luns.attr,
4635         &driver_attr_max_queue.attr,
4636         &driver_attr_no_uld.attr,
4637         &driver_attr_scsi_level.attr,
4638         &driver_attr_virtual_gb.attr,
4639         &driver_attr_add_host.attr,
4640         &driver_attr_vpd_use_hostno.attr,
4641         &driver_attr_sector_size.attr,
4642         &driver_attr_dix.attr,
4643         &driver_attr_dif.attr,
4644         &driver_attr_guard.attr,
4645         &driver_attr_ato.attr,
4646         &driver_attr_map.attr,
4647         &driver_attr_removable.attr,
4648         &driver_attr_host_lock.attr,
4649         &driver_attr_ndelay.attr,
4650         &driver_attr_strict.attr,
4651         NULL,
4652 };
4653 ATTRIBUTE_GROUPS(sdebug_drv);
4654
4655 static struct device *pseudo_primary;
4656
4657 static int __init scsi_debug_init(void)
4658 {
4659         unsigned long sz;
4660         int host_to_add;
4661         int k;
4662         int ret;
4663
4664         atomic_set(&sdebug_cmnd_count, 0);
4665         atomic_set(&sdebug_completions, 0);
4666         atomic_set(&retired_max_queue, 0);
4667
4668         if (scsi_debug_ndelay >= 1000000000) {
4669                 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
4670                         __func__);
4671                 scsi_debug_ndelay = 0;
4672         } else if (scsi_debug_ndelay > 0)
4673                 scsi_debug_delay = DELAY_OVERRIDDEN;
4674
4675         switch (scsi_debug_sector_size) {
4676         case  512:
4677         case 1024:
4678         case 2048:
4679         case 4096:
4680                 break;
4681         default:
4682                 pr_err("%s: invalid sector_size %d\n", __func__,
4683                        scsi_debug_sector_size);
4684                 return -EINVAL;
4685         }
4686
4687         switch (scsi_debug_dif) {
4688
4689         case SD_DIF_TYPE0_PROTECTION:
4690         case SD_DIF_TYPE1_PROTECTION:
4691         case SD_DIF_TYPE2_PROTECTION:
4692         case SD_DIF_TYPE3_PROTECTION:
4693                 break;
4694
4695         default:
4696                 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
4697                 return -EINVAL;
4698         }
4699
4700         if (scsi_debug_guard > 1) {
4701                 pr_err("%s: guard must be 0 or 1\n", __func__);
4702                 return -EINVAL;
4703         }
4704
4705         if (scsi_debug_ato > 1) {
4706                 pr_err("%s: ato must be 0 or 1\n", __func__);
4707                 return -EINVAL;
4708         }
4709
4710         if (scsi_debug_physblk_exp > 15) {
4711                 pr_err("%s: invalid physblk_exp %u\n", __func__,
4712                        scsi_debug_physblk_exp);
4713                 return -EINVAL;
4714         }
4715
4716         if (scsi_debug_lowest_aligned > 0x3fff) {
4717                 pr_err("%s: lowest_aligned too big: %u\n", __func__,
4718                        scsi_debug_lowest_aligned);
4719                 return -EINVAL;
4720         }
4721
4722         if (scsi_debug_dev_size_mb < 1)
4723                 scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4724         sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4725         sdebug_store_sectors = sz / scsi_debug_sector_size;
4726         sdebug_capacity = get_sdebug_capacity();
4727
4728         /* play around with geometry, don't waste too much on track 0 */
4729         sdebug_heads = 8;
4730         sdebug_sectors_per = 32;
4731         if (scsi_debug_dev_size_mb >= 16)
4732                 sdebug_heads = 32;
4733         else if (scsi_debug_dev_size_mb >= 256)
4734                 sdebug_heads = 64;
4735         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4736                                (sdebug_sectors_per * sdebug_heads);
4737         if (sdebug_cylinders_per >= 1024) {
4738                 /* other LLDs do this; implies >= 1GB ram disk ... */
4739                 sdebug_heads = 255;
4740                 sdebug_sectors_per = 63;
4741                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4742                                (sdebug_sectors_per * sdebug_heads);
4743         }
4744
4745         if (0 == scsi_debug_fake_rw) {
4746                 fake_storep = vmalloc(sz);
4747                 if (NULL == fake_storep) {
4748                         pr_err("%s: out of memory, 1\n", __func__);
4749                         return -ENOMEM;
4750                 }
4751                 memset(fake_storep, 0, sz);
4752                 if (scsi_debug_num_parts > 0)
4753                         sdebug_build_parts(fake_storep, sz);
4754         }
4755
4756         if (scsi_debug_dix) {
4757                 int dif_size;
4758
4759                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4760                 dif_storep = vmalloc(dif_size);
4761
4762                 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
4763                         dif_storep);
4764
4765                 if (dif_storep == NULL) {
4766                         pr_err("%s: out of mem. (DIX)\n", __func__);
4767                         ret = -ENOMEM;
4768                         goto free_vm;
4769                 }
4770
4771                 memset(dif_storep, 0xff, dif_size);
4772         }
4773
4774         /* Logical Block Provisioning */
4775         if (scsi_debug_lbp()) {
4776                 scsi_debug_unmap_max_blocks =
4777                         clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4778
4779                 scsi_debug_unmap_max_desc =
4780                         clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4781
4782                 scsi_debug_unmap_granularity =
4783                         clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4784
4785                 if (scsi_debug_unmap_alignment &&
4786                     scsi_debug_unmap_granularity <=
4787                     scsi_debug_unmap_alignment) {
4788                         pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
4789                                __func__);
4790                         return -EINVAL;
4791                 }
4792
4793                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4794                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4795
4796                 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
4797
4798                 if (map_storep == NULL) {
4799                         pr_err("%s: out of mem. (MAP)\n", __func__);
4800                         ret = -ENOMEM;
4801                         goto free_vm;
4802                 }
4803
4804                 bitmap_zero(map_storep, map_size);
4805
4806                 /* Map first 1KB for partition table */
4807                 if (scsi_debug_num_parts)
4808                         map_region(0, 2);
4809         }
4810
4811         pseudo_primary = root_device_register("pseudo_0");
4812         if (IS_ERR(pseudo_primary)) {
4813                 pr_warn("%s: root_device_register() error\n", __func__);
4814                 ret = PTR_ERR(pseudo_primary);
4815                 goto free_vm;
4816         }
4817         ret = bus_register(&pseudo_lld_bus);
4818         if (ret < 0) {
4819                 pr_warn("%s: bus_register error: %d\n", __func__, ret);
4820                 goto dev_unreg;
4821         }
4822         ret = driver_register(&sdebug_driverfs_driver);
4823         if (ret < 0) {
4824                 pr_warn("%s: driver_register error: %d\n", __func__, ret);
4825                 goto bus_unreg;
4826         }
4827
4828         host_to_add = scsi_debug_add_host;
4829         scsi_debug_add_host = 0;
4830
4831         for (k = 0; k < host_to_add; k++) {
4832                 if (sdebug_add_adapter()) {
4833                         pr_err("%s: sdebug_add_adapter failed k=%d\n",
4834                                 __func__, k);
4835                         break;
4836                 }
4837         }
4838
4839         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4840                 pr_info("%s: built %d host(s)\n", __func__,
4841                         scsi_debug_add_host);
4842         }
4843         return 0;
4844
4845 bus_unreg:
4846         bus_unregister(&pseudo_lld_bus);
4847 dev_unreg:
4848         root_device_unregister(pseudo_primary);
4849 free_vm:
4850         if (map_storep)
4851                 vfree(map_storep);
4852         if (dif_storep)
4853                 vfree(dif_storep);
4854         vfree(fake_storep);
4855
4856         return ret;
4857 }
4858
4859 static void __exit scsi_debug_exit(void)
4860 {
4861         int k = scsi_debug_add_host;
4862
4863         stop_all_queued();
4864         free_all_queued();
4865         for (; k; k--)
4866                 sdebug_remove_adapter();
4867         driver_unregister(&sdebug_driverfs_driver);
4868         bus_unregister(&pseudo_lld_bus);
4869         root_device_unregister(pseudo_primary);
4870
4871         if (dif_storep)
4872                 vfree(dif_storep);
4873
4874         vfree(fake_storep);
4875 }
4876
4877 device_initcall(scsi_debug_init);
4878 module_exit(scsi_debug_exit);
4879
4880 static void sdebug_release_adapter(struct device * dev)
4881 {
4882         struct sdebug_host_info *sdbg_host;
4883
4884         sdbg_host = to_sdebug_host(dev);
4885         kfree(sdbg_host);
4886 }
4887
4888 static int sdebug_add_adapter(void)
4889 {
4890         int k, devs_per_host;
4891         int error = 0;
4892         struct sdebug_host_info *sdbg_host;
4893         struct sdebug_dev_info *sdbg_devinfo, *tmp;
4894
4895         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
4896         if (NULL == sdbg_host) {
4897                 printk(KERN_ERR "%s: out of memory at line %d\n",
4898                        __func__, __LINE__);
4899                 return -ENOMEM;
4900         }
4901
4902         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
4903
4904         devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
4905         for (k = 0; k < devs_per_host; k++) {
4906                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
4907                 if (!sdbg_devinfo) {
4908                         printk(KERN_ERR "%s: out of memory at line %d\n",
4909                                __func__, __LINE__);
4910                         error = -ENOMEM;
4911                         goto clean;
4912                 }
4913         }
4914
4915         spin_lock(&sdebug_host_list_lock);
4916         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
4917         spin_unlock(&sdebug_host_list_lock);
4918
4919         sdbg_host->dev.bus = &pseudo_lld_bus;
4920         sdbg_host->dev.parent = pseudo_primary;
4921         sdbg_host->dev.release = &sdebug_release_adapter;
4922         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
4923
4924         error = device_register(&sdbg_host->dev);
4925
4926         if (error)
4927                 goto clean;
4928
4929         ++scsi_debug_add_host;
4930         return error;
4931
4932 clean:
4933         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4934                                  dev_list) {
4935                 list_del(&sdbg_devinfo->dev_list);
4936                 kfree(sdbg_devinfo);
4937         }
4938
4939         kfree(sdbg_host);
4940         return error;
4941 }
4942
4943 static void sdebug_remove_adapter(void)
4944 {
4945         struct sdebug_host_info * sdbg_host = NULL;
4946
4947         spin_lock(&sdebug_host_list_lock);
4948         if (!list_empty(&sdebug_host_list)) {
4949                 sdbg_host = list_entry(sdebug_host_list.prev,
4950                                        struct sdebug_host_info, host_list);
4951                 list_del(&sdbg_host->host_list);
4952         }
4953         spin_unlock(&sdebug_host_list_lock);
4954
4955         if (!sdbg_host)
4956                 return;
4957
4958         device_unregister(&sdbg_host->dev);
4959         --scsi_debug_add_host;
4960 }
4961
4962 static int
4963 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4964 {
4965         int num_in_q = 0;
4966         unsigned long iflags;
4967         struct sdebug_dev_info *devip;
4968
4969         spin_lock_irqsave(&queued_arr_lock, iflags);
4970         devip = (struct sdebug_dev_info *)sdev->hostdata;
4971         if (NULL == devip) {
4972                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4973                 return  -ENODEV;
4974         }
4975         num_in_q = atomic_read(&devip->num_in_q);
4976         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4977
4978         if (qdepth < 1)
4979                 qdepth = 1;
4980         /* allow to exceed max host queued_arr elements for testing */
4981         if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
4982                 qdepth = SCSI_DEBUG_CANQUEUE + 10;
4983         scsi_change_queue_depth(sdev, qdepth);
4984
4985         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4986                 sdev_printk(KERN_INFO, sdev,
4987                             "%s: qdepth=%d, num_in_q=%d\n",
4988                             __func__, qdepth, num_in_q);
4989         }
4990         return sdev->queue_depth;
4991 }
4992
4993 static int
4994 check_inject(struct scsi_cmnd *scp)
4995 {
4996         struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
4997
4998         memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
4999
5000         if (atomic_inc_return(&sdebug_cmnd_count) >=
5001             abs(scsi_debug_every_nth)) {
5002                 atomic_set(&sdebug_cmnd_count, 0);
5003                 if (scsi_debug_every_nth < -1)
5004                         scsi_debug_every_nth = -1;
5005                 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5006                         return 1; /* ignore command causing timeout */
5007                 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5008                          scsi_medium_access_command(scp))
5009                         return 1; /* time out reads and writes */
5010                 if (sdebug_any_injecting_opt) {
5011                         int opts = scsi_debug_opts;
5012
5013                         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5014                                 ep->inj_recovered = true;
5015                         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5016                                 ep->inj_transport = true;
5017                         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5018                                 ep->inj_dif = true;
5019                         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5020                                 ep->inj_dix = true;
5021                         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5022                                 ep->inj_short = true;
5023                 }
5024         }
5025         return 0;
5026 }
5027
5028 static int
5029 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5030 {
5031         u8 sdeb_i;
5032         struct scsi_device *sdp = scp->device;
5033         const struct opcode_info_t *oip;
5034         const struct opcode_info_t *r_oip;
5035         struct sdebug_dev_info *devip;
5036         u8 *cmd = scp->cmnd;
5037         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5038         int k, na;
5039         int errsts = 0;
5040         int errsts_no_connect = DID_NO_CONNECT << 16;
5041         u32 flags;
5042         u16 sa;
5043         u8 opcode = cmd[0];
5044         bool has_wlun_rl;
5045         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5046
5047         scsi_set_resid(scp, 0);
5048         if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5049                 char b[120];
5050                 int n, len, sb;
5051
5052                 len = scp->cmd_len;
5053                 sb = (int)sizeof(b);
5054                 if (len > 32)
5055                         strcpy(b, "too long, over 32 bytes");
5056                 else {
5057                         for (k = 0, n = 0; k < len && n < sb; ++k)
5058                                 n += scnprintf(b + n, sb - n, "%02x ",
5059                                                (u32)cmd[k]);
5060                 }
5061                 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5062         }
5063         has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5064         if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5065                 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5066
5067         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5068         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5069         devip = (struct sdebug_dev_info *)sdp->hostdata;
5070         if (!devip) {
5071                 devip = devInfoReg(sdp);
5072                 if (NULL == devip)
5073                         return schedule_resp(scp, NULL, errsts_no_connect, 0);
5074         }
5075         na = oip->num_attached;
5076         r_pfp = oip->pfp;
5077         if (na) {       /* multiple commands with this opcode */
5078                 r_oip = oip;
5079                 if (FF_SA & r_oip->flags) {
5080                         if (F_SA_LOW & oip->flags)
5081                                 sa = 0x1f & cmd[1];
5082                         else
5083                                 sa = get_unaligned_be16(cmd + 8);
5084                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5085                                 if (opcode == oip->opcode && sa == oip->sa)
5086                                         break;
5087                         }
5088                 } else {   /* since no service action only check opcode */
5089                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5090                                 if (opcode == oip->opcode)
5091                                         break;
5092                         }
5093                 }
5094                 if (k > na) {
5095                         if (F_SA_LOW & r_oip->flags)
5096                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5097                         else if (F_SA_HIGH & r_oip->flags)
5098                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5099                         else
5100                                 mk_sense_invalid_opcode(scp);
5101                         goto check_cond;
5102                 }
5103         }       /* else (when na==0) we assume the oip is a match */
5104         flags = oip->flags;
5105         if (F_INV_OP & flags) {
5106                 mk_sense_invalid_opcode(scp);
5107                 goto check_cond;
5108         }
5109         if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5110                 if (debug)
5111                         sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5112                                     "0x%x not supported for wlun\n", opcode);
5113                 mk_sense_invalid_opcode(scp);
5114                 goto check_cond;
5115         }
5116         if (scsi_debug_strict) {        /* check cdb against mask */
5117                 u8 rem;
5118                 int j;
5119
5120                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5121                         rem = ~oip->len_mask[k] & cmd[k];
5122                         if (rem) {
5123                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5124                                         if (0x80 & rem)
5125                                                 break;
5126                                 }
5127                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5128                                 goto check_cond;
5129                         }
5130                 }
5131         }
5132         if (!(F_SKIP_UA & flags) &&
5133             SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5134                 errsts = check_readiness(scp, UAS_ONLY, devip);
5135                 if (errsts)
5136                         goto check_cond;
5137         }
5138         if ((F_M_ACCESS & flags) && devip->stopped) {
5139                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5140                 if (debug)
5141                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5142                                     "%s\n", my_name, "initializing command "
5143                                     "required");
5144                 errsts = check_condition_result;
5145                 goto fini;
5146         }
5147         if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5148                 goto fini;
5149         if (scsi_debug_every_nth) {
5150                 if (check_inject(scp))
5151                         return 0;       /* ignore command: make trouble */
5152         }
5153         if (oip->pfp)   /* if this command has a resp_* function, call it */
5154                 errsts = oip->pfp(scp, devip);
5155         else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5156                 errsts = r_pfp(scp, devip);
5157
5158 fini:
5159         return schedule_resp(scp, devip, errsts,
5160                              ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5161 check_cond:
5162         return schedule_resp(scp, devip, check_condition_result, 0);
5163 }
5164
5165 static int
5166 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5167 {
5168         if (scsi_debug_host_lock) {
5169                 unsigned long iflags;
5170                 int rc;
5171
5172                 spin_lock_irqsave(shost->host_lock, iflags);
5173                 rc = scsi_debug_queuecommand(cmd);
5174                 spin_unlock_irqrestore(shost->host_lock, iflags);
5175                 return rc;
5176         } else
5177                 return scsi_debug_queuecommand(cmd);
5178 }
5179
5180 static struct scsi_host_template sdebug_driver_template = {
5181         .show_info =            scsi_debug_show_info,
5182         .write_info =           scsi_debug_write_info,
5183         .proc_name =            sdebug_proc_name,
5184         .name =                 "SCSI DEBUG",
5185         .info =                 scsi_debug_info,
5186         .slave_alloc =          scsi_debug_slave_alloc,
5187         .slave_configure =      scsi_debug_slave_configure,
5188         .slave_destroy =        scsi_debug_slave_destroy,
5189         .ioctl =                scsi_debug_ioctl,
5190         .queuecommand =         sdebug_queuecommand_lock_or_not,
5191         .change_queue_depth =   sdebug_change_qdepth,
5192         .eh_abort_handler =     scsi_debug_abort,
5193         .eh_device_reset_handler = scsi_debug_device_reset,
5194         .eh_target_reset_handler = scsi_debug_target_reset,
5195         .eh_bus_reset_handler = scsi_debug_bus_reset,
5196         .eh_host_reset_handler = scsi_debug_host_reset,
5197         .can_queue =            SCSI_DEBUG_CANQUEUE,
5198         .this_id =              7,
5199         .sg_tablesize =         SCSI_MAX_SG_CHAIN_SEGMENTS,
5200         .cmd_per_lun =          DEF_CMD_PER_LUN,
5201         .max_sectors =          -1U,
5202         .use_clustering =       DISABLE_CLUSTERING,
5203         .module =               THIS_MODULE,
5204         .track_queue_depth =    1,
5205         .cmd_size =             sizeof(struct sdebug_scmd_extra_t),
5206 };
5207
5208 static int sdebug_driver_probe(struct device * dev)
5209 {
5210         int error = 0;
5211         int opts;
5212         struct sdebug_host_info *sdbg_host;
5213         struct Scsi_Host *hpnt;
5214         int host_prot;
5215
5216         sdbg_host = to_sdebug_host(dev);
5217
5218         sdebug_driver_template.can_queue = scsi_debug_max_queue;
5219         if (scsi_debug_clustering)
5220                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5221         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5222         if (NULL == hpnt) {
5223                 pr_err("%s: scsi_host_alloc failed\n", __func__);
5224                 error = -ENODEV;
5225                 return error;
5226         }
5227
5228         sdbg_host->shost = hpnt;
5229         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5230         if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5231                 hpnt->max_id = scsi_debug_num_tgts + 1;
5232         else
5233                 hpnt->max_id = scsi_debug_num_tgts;
5234         hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;  /* = scsi_debug_max_luns; */
5235
5236         host_prot = 0;
5237
5238         switch (scsi_debug_dif) {
5239
5240         case SD_DIF_TYPE1_PROTECTION:
5241                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5242                 if (scsi_debug_dix)
5243                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5244                 break;
5245
5246         case SD_DIF_TYPE2_PROTECTION:
5247                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5248                 if (scsi_debug_dix)
5249                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5250                 break;
5251
5252         case SD_DIF_TYPE3_PROTECTION:
5253                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5254                 if (scsi_debug_dix)
5255                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5256                 break;
5257
5258         default:
5259                 if (scsi_debug_dix)
5260                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5261                 break;
5262         }
5263
5264         scsi_host_set_prot(hpnt, host_prot);
5265
5266         printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
5267                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5268                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5269                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5270                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5271                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5272                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5273                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5274
5275         if (scsi_debug_guard == 1)
5276                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5277         else
5278                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5279
5280         opts = scsi_debug_opts;
5281         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5282                 sdebug_any_injecting_opt = true;
5283         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5284                 sdebug_any_injecting_opt = true;
5285         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5286                 sdebug_any_injecting_opt = true;
5287         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5288                 sdebug_any_injecting_opt = true;
5289         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5290                 sdebug_any_injecting_opt = true;
5291
5292         error = scsi_add_host(hpnt, &sdbg_host->dev);
5293         if (error) {
5294                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
5295                 error = -ENODEV;
5296                 scsi_host_put(hpnt);
5297         } else
5298                 scsi_scan_host(hpnt);
5299
5300         return error;
5301 }
5302
5303 static int sdebug_driver_remove(struct device * dev)
5304 {
5305         struct sdebug_host_info *sdbg_host;
5306         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5307
5308         sdbg_host = to_sdebug_host(dev);
5309
5310         if (!sdbg_host) {
5311                 printk(KERN_ERR "%s: Unable to locate host info\n",
5312                        __func__);
5313                 return -ENODEV;
5314         }
5315
5316         scsi_remove_host(sdbg_host->shost);
5317
5318         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5319                                  dev_list) {
5320                 list_del(&sdbg_devinfo->dev_list);
5321                 kfree(sdbg_devinfo);
5322         }
5323
5324         scsi_host_put(sdbg_host->shost);
5325         return 0;
5326 }
5327
5328 static int pseudo_lld_bus_match(struct device *dev,
5329                                 struct device_driver *dev_driver)
5330 {
5331         return 1;
5332 }
5333
5334 static struct bus_type pseudo_lld_bus = {
5335         .name = "pseudo",
5336         .match = pseudo_lld_bus_match,
5337         .probe = sdebug_driver_probe,
5338         .remove = sdebug_driver_remove,
5339         .drv_groups = sdebug_drv_groups,
5340 };