Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / vhost / scsi.c
1 /*******************************************************************************
2  * Vhost kernel TCM fabric driver for virtio SCSI initiators
3  *
4  * (C) Copyright 2010-2013 Datera, Inc.
5  * (C) Copyright 2010-2012 IBM Corp.
6  *
7  * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8  *
9  * Authors: Nicholas A. Bellinger <nab@daterainc.com>
10  *          Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  *
22  ****************************************************************************/
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <generated/utsrelease.h>
27 #include <linux/utsname.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/kthread.h>
31 #include <linux/types.h>
32 #include <linux/string.h>
33 #include <linux/configfs.h>
34 #include <linux/ctype.h>
35 #include <linux/compat.h>
36 #include <linux/eventfd.h>
37 #include <linux/fs.h>
38 #include <linux/miscdevice.h>
39 #include <asm/unaligned.h>
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_tcq.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_fabric.h>
44 #include <target/target_core_fabric_configfs.h>
45 #include <target/target_core_configfs.h>
46 #include <target/configfs_macros.h>
47 #include <linux/vhost.h>
48 #include <linux/virtio_scsi.h>
49 #include <linux/llist.h>
50 #include <linux/bitmap.h>
51 #include <linux/percpu_ida.h>
52
53 #include "vhost.h"
54
55 #define TCM_VHOST_VERSION  "v0.1"
56 #define TCM_VHOST_NAMELEN 256
57 #define TCM_VHOST_MAX_CDB_SIZE 32
58 #define TCM_VHOST_DEFAULT_TAGS 256
59 #define TCM_VHOST_PREALLOC_SGLS 2048
60 #define TCM_VHOST_PREALLOC_PAGES 2048
61
62 struct vhost_scsi_inflight {
63         /* Wait for the flush operation to finish */
64         struct completion comp;
65         /* Refcount for the inflight reqs */
66         struct kref kref;
67 };
68
69 struct tcm_vhost_cmd {
70         /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
71         int tvc_vq_desc;
72         /* virtio-scsi initiator task attribute */
73         int tvc_task_attr;
74         /* virtio-scsi initiator data direction */
75         enum dma_data_direction tvc_data_direction;
76         /* Expected data transfer length from virtio-scsi header */
77         u32 tvc_exp_data_len;
78         /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
79         u64 tvc_tag;
80         /* The number of scatterlists associated with this cmd */
81         u32 tvc_sgl_count;
82         /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
83         u32 tvc_lun;
84         /* Pointer to the SGL formatted memory from virtio-scsi */
85         struct scatterlist *tvc_sgl;
86         struct page **tvc_upages;
87         /* Pointer to response */
88         struct virtio_scsi_cmd_resp __user *tvc_resp;
89         /* Pointer to vhost_scsi for our device */
90         struct vhost_scsi *tvc_vhost;
91         /* Pointer to vhost_virtqueue for the cmd */
92         struct vhost_virtqueue *tvc_vq;
93         /* Pointer to vhost nexus memory */
94         struct tcm_vhost_nexus *tvc_nexus;
95         /* The TCM I/O descriptor that is accessed via container_of() */
96         struct se_cmd tvc_se_cmd;
97         /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
98         struct work_struct work;
99         /* Copy of the incoming SCSI command descriptor block (CDB) */
100         unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
101         /* Sense buffer that will be mapped into outgoing status */
102         unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
103         /* Completed commands list, serviced from vhost worker thread */
104         struct llist_node tvc_completion_list;
105         /* Used to track inflight cmd */
106         struct vhost_scsi_inflight *inflight;
107 };
108
109 struct tcm_vhost_nexus {
110         /* Pointer to TCM session for I_T Nexus */
111         struct se_session *tvn_se_sess;
112 };
113
114 struct tcm_vhost_nacl {
115         /* Binary World Wide unique Port Name for Vhost Initiator port */
116         u64 iport_wwpn;
117         /* ASCII formatted WWPN for Sas Initiator port */
118         char iport_name[TCM_VHOST_NAMELEN];
119         /* Returned by tcm_vhost_make_nodeacl() */
120         struct se_node_acl se_node_acl;
121 };
122
123 struct tcm_vhost_tpg {
124         /* Vhost port target portal group tag for TCM */
125         u16 tport_tpgt;
126         /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
127         int tv_tpg_port_count;
128         /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
129         int tv_tpg_vhost_count;
130         /* list for tcm_vhost_list */
131         struct list_head tv_tpg_list;
132         /* Used to protect access for tpg_nexus */
133         struct mutex tv_tpg_mutex;
134         /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
135         struct tcm_vhost_nexus *tpg_nexus;
136         /* Pointer back to tcm_vhost_tport */
137         struct tcm_vhost_tport *tport;
138         /* Returned by tcm_vhost_make_tpg() */
139         struct se_portal_group se_tpg;
140         /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
141         struct vhost_scsi *vhost_scsi;
142 };
143
144 struct tcm_vhost_tport {
145         /* SCSI protocol the tport is providing */
146         u8 tport_proto_id;
147         /* Binary World Wide unique Port Name for Vhost Target port */
148         u64 tport_wwpn;
149         /* ASCII formatted WWPN for Vhost Target port */
150         char tport_name[TCM_VHOST_NAMELEN];
151         /* Returned by tcm_vhost_make_tport() */
152         struct se_wwn tport_wwn;
153 };
154
155 struct tcm_vhost_evt {
156         /* event to be sent to guest */
157         struct virtio_scsi_event event;
158         /* event list, serviced from vhost worker thread */
159         struct llist_node list;
160 };
161
162 enum {
163         VHOST_SCSI_VQ_CTL = 0,
164         VHOST_SCSI_VQ_EVT = 1,
165         VHOST_SCSI_VQ_IO = 2,
166 };
167
168 enum {
169         VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
170 };
171
172 #define VHOST_SCSI_MAX_TARGET   256
173 #define VHOST_SCSI_MAX_VQ       128
174 #define VHOST_SCSI_MAX_EVENT    128
175
176 struct vhost_scsi_virtqueue {
177         struct vhost_virtqueue vq;
178         /*
179          * Reference counting for inflight reqs, used for flush operation. At
180          * each time, one reference tracks new commands submitted, while we
181          * wait for another one to reach 0.
182          */
183         struct vhost_scsi_inflight inflights[2];
184         /*
185          * Indicate current inflight in use, protected by vq->mutex.
186          * Writers must also take dev mutex and flush under it.
187          */
188         int inflight_idx;
189 };
190
191 struct vhost_scsi {
192         /* Protected by vhost_scsi->dev.mutex */
193         struct tcm_vhost_tpg **vs_tpg;
194         char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
195
196         struct vhost_dev dev;
197         struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ];
198
199         struct vhost_work vs_completion_work; /* cmd completion work item */
200         struct llist_head vs_completion_list; /* cmd completion queue */
201
202         struct vhost_work vs_event_work; /* evt injection work item */
203         struct llist_head vs_event_list; /* evt injection queue */
204
205         bool vs_events_missed; /* any missed events, protected by vq->mutex */
206         int vs_events_nr; /* num of pending events, protected by vq->mutex */
207 };
208
209 /* Local pointer to allocated TCM configfs fabric module */
210 static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
211
212 static struct workqueue_struct *tcm_vhost_workqueue;
213
214 /* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
215 static DEFINE_MUTEX(tcm_vhost_mutex);
216 static LIST_HEAD(tcm_vhost_list);
217
218 static int iov_num_pages(struct iovec *iov)
219 {
220         return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
221                ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
222 }
223
224 static void tcm_vhost_done_inflight(struct kref *kref)
225 {
226         struct vhost_scsi_inflight *inflight;
227
228         inflight = container_of(kref, struct vhost_scsi_inflight, kref);
229         complete(&inflight->comp);
230 }
231
232 static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
233                                     struct vhost_scsi_inflight *old_inflight[])
234 {
235         struct vhost_scsi_inflight *new_inflight;
236         struct vhost_virtqueue *vq;
237         int idx, i;
238
239         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
240                 vq = &vs->vqs[i].vq;
241
242                 mutex_lock(&vq->mutex);
243
244                 /* store old infight */
245                 idx = vs->vqs[i].inflight_idx;
246                 if (old_inflight)
247                         old_inflight[i] = &vs->vqs[i].inflights[idx];
248
249                 /* setup new infight */
250                 vs->vqs[i].inflight_idx = idx ^ 1;
251                 new_inflight = &vs->vqs[i].inflights[idx ^ 1];
252                 kref_init(&new_inflight->kref);
253                 init_completion(&new_inflight->comp);
254
255                 mutex_unlock(&vq->mutex);
256         }
257 }
258
259 static struct vhost_scsi_inflight *
260 tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
261 {
262         struct vhost_scsi_inflight *inflight;
263         struct vhost_scsi_virtqueue *svq;
264
265         svq = container_of(vq, struct vhost_scsi_virtqueue, vq);
266         inflight = &svq->inflights[svq->inflight_idx];
267         kref_get(&inflight->kref);
268
269         return inflight;
270 }
271
272 static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
273 {
274         kref_put(&inflight->kref, tcm_vhost_done_inflight);
275 }
276
277 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
278 {
279         return 1;
280 }
281
282 static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
283 {
284         return 0;
285 }
286
287 static char *tcm_vhost_get_fabric_name(void)
288 {
289         return "vhost";
290 }
291
292 static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
293 {
294         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
295                                 struct tcm_vhost_tpg, se_tpg);
296         struct tcm_vhost_tport *tport = tpg->tport;
297
298         switch (tport->tport_proto_id) {
299         case SCSI_PROTOCOL_SAS:
300                 return sas_get_fabric_proto_ident(se_tpg);
301         case SCSI_PROTOCOL_FCP:
302                 return fc_get_fabric_proto_ident(se_tpg);
303         case SCSI_PROTOCOL_ISCSI:
304                 return iscsi_get_fabric_proto_ident(se_tpg);
305         default:
306                 pr_err("Unknown tport_proto_id: 0x%02x, using"
307                         " SAS emulation\n", tport->tport_proto_id);
308                 break;
309         }
310
311         return sas_get_fabric_proto_ident(se_tpg);
312 }
313
314 static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
315 {
316         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
317                                 struct tcm_vhost_tpg, se_tpg);
318         struct tcm_vhost_tport *tport = tpg->tport;
319
320         return &tport->tport_name[0];
321 }
322
323 static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
324 {
325         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
326                                 struct tcm_vhost_tpg, se_tpg);
327         return tpg->tport_tpgt;
328 }
329
330 static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
331 {
332         return 1;
333 }
334
335 static u32
336 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
337                               struct se_node_acl *se_nacl,
338                               struct t10_pr_registration *pr_reg,
339                               int *format_code,
340                               unsigned char *buf)
341 {
342         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
343                                 struct tcm_vhost_tpg, se_tpg);
344         struct tcm_vhost_tport *tport = tpg->tport;
345
346         switch (tport->tport_proto_id) {
347         case SCSI_PROTOCOL_SAS:
348                 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
349                                         format_code, buf);
350         case SCSI_PROTOCOL_FCP:
351                 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
352                                         format_code, buf);
353         case SCSI_PROTOCOL_ISCSI:
354                 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
355                                         format_code, buf);
356         default:
357                 pr_err("Unknown tport_proto_id: 0x%02x, using"
358                         " SAS emulation\n", tport->tport_proto_id);
359                 break;
360         }
361
362         return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
363                         format_code, buf);
364 }
365
366 static u32
367 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
368                                   struct se_node_acl *se_nacl,
369                                   struct t10_pr_registration *pr_reg,
370                                   int *format_code)
371 {
372         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
373                                 struct tcm_vhost_tpg, se_tpg);
374         struct tcm_vhost_tport *tport = tpg->tport;
375
376         switch (tport->tport_proto_id) {
377         case SCSI_PROTOCOL_SAS:
378                 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
379                                         format_code);
380         case SCSI_PROTOCOL_FCP:
381                 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
382                                         format_code);
383         case SCSI_PROTOCOL_ISCSI:
384                 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
385                                         format_code);
386         default:
387                 pr_err("Unknown tport_proto_id: 0x%02x, using"
388                         " SAS emulation\n", tport->tport_proto_id);
389                 break;
390         }
391
392         return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
393                         format_code);
394 }
395
396 static char *
397 tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
398                                     const char *buf,
399                                     u32 *out_tid_len,
400                                     char **port_nexus_ptr)
401 {
402         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
403                                 struct tcm_vhost_tpg, se_tpg);
404         struct tcm_vhost_tport *tport = tpg->tport;
405
406         switch (tport->tport_proto_id) {
407         case SCSI_PROTOCOL_SAS:
408                 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
409                                         port_nexus_ptr);
410         case SCSI_PROTOCOL_FCP:
411                 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
412                                         port_nexus_ptr);
413         case SCSI_PROTOCOL_ISCSI:
414                 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
415                                         port_nexus_ptr);
416         default:
417                 pr_err("Unknown tport_proto_id: 0x%02x, using"
418                         " SAS emulation\n", tport->tport_proto_id);
419                 break;
420         }
421
422         return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
423                         port_nexus_ptr);
424 }
425
426 static struct se_node_acl *
427 tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
428 {
429         struct tcm_vhost_nacl *nacl;
430
431         nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
432         if (!nacl) {
433                 pr_err("Unable to allocate struct tcm_vhost_nacl\n");
434                 return NULL;
435         }
436
437         return &nacl->se_node_acl;
438 }
439
440 static void
441 tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
442                              struct se_node_acl *se_nacl)
443 {
444         struct tcm_vhost_nacl *nacl = container_of(se_nacl,
445                         struct tcm_vhost_nacl, se_node_acl);
446         kfree(nacl);
447 }
448
449 static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
450 {
451         return 1;
452 }
453
454 static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
455 {
456         struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
457                                 struct tcm_vhost_cmd, tvc_se_cmd);
458         struct se_session *se_sess = se_cmd->se_sess;
459
460         if (tv_cmd->tvc_sgl_count) {
461                 u32 i;
462                 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
463                         put_page(sg_page(&tv_cmd->tvc_sgl[i]));
464         }
465
466         tcm_vhost_put_inflight(tv_cmd->inflight);
467         percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
468 }
469
470 static int tcm_vhost_shutdown_session(struct se_session *se_sess)
471 {
472         return 0;
473 }
474
475 static void tcm_vhost_close_session(struct se_session *se_sess)
476 {
477         return;
478 }
479
480 static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
481 {
482         return 0;
483 }
484
485 static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
486 {
487         /* Go ahead and process the write immediately */
488         target_execute_cmd(se_cmd);
489         return 0;
490 }
491
492 static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
493 {
494         return 0;
495 }
496
497 static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
498 {
499         return;
500 }
501
502 static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
503 {
504         return 0;
505 }
506
507 static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
508 {
509         return 0;
510 }
511
512 static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
513 {
514         struct vhost_scsi *vs = cmd->tvc_vhost;
515
516         llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
517
518         vhost_work_queue(&vs->dev, &vs->vs_completion_work);
519 }
520
521 static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
522 {
523         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
524                                 struct tcm_vhost_cmd, tvc_se_cmd);
525         vhost_scsi_complete_cmd(cmd);
526         return 0;
527 }
528
529 static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
530 {
531         struct tcm_vhost_cmd *cmd = container_of(se_cmd,
532                                 struct tcm_vhost_cmd, tvc_se_cmd);
533         vhost_scsi_complete_cmd(cmd);
534         return 0;
535 }
536
537 static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
538 {
539         return;
540 }
541
542 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
543 {
544         vs->vs_events_nr--;
545         kfree(evt);
546 }
547
548 static struct tcm_vhost_evt *
549 tcm_vhost_allocate_evt(struct vhost_scsi *vs,
550                        u32 event, u32 reason)
551 {
552         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
553         struct tcm_vhost_evt *evt;
554
555         if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
556                 vs->vs_events_missed = true;
557                 return NULL;
558         }
559
560         evt = kzalloc(sizeof(*evt), GFP_KERNEL);
561         if (!evt) {
562                 vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
563                 vs->vs_events_missed = true;
564                 return NULL;
565         }
566
567         evt->event.event = event;
568         evt->event.reason = reason;
569         vs->vs_events_nr++;
570
571         return evt;
572 }
573
574 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
575 {
576         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
577
578         /* TODO locking against target/backend threads? */
579         transport_generic_free_cmd(se_cmd, 0);
580
581 }
582
583 static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
584 {
585         return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
586 }
587
588 static void
589 tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
590 {
591         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
592         struct virtio_scsi_event *event = &evt->event;
593         struct virtio_scsi_event __user *eventp;
594         unsigned out, in;
595         int head, ret;
596
597         if (!vq->private_data) {
598                 vs->vs_events_missed = true;
599                 return;
600         }
601
602 again:
603         vhost_disable_notify(&vs->dev, vq);
604         head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
605                         ARRAY_SIZE(vq->iov), &out, &in,
606                         NULL, NULL);
607         if (head < 0) {
608                 vs->vs_events_missed = true;
609                 return;
610         }
611         if (head == vq->num) {
612                 if (vhost_enable_notify(&vs->dev, vq))
613                         goto again;
614                 vs->vs_events_missed = true;
615                 return;
616         }
617
618         if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
619                 vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
620                                 vq->iov[out].iov_len);
621                 vs->vs_events_missed = true;
622                 return;
623         }
624
625         if (vs->vs_events_missed) {
626                 event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
627                 vs->vs_events_missed = false;
628         }
629
630         eventp = vq->iov[out].iov_base;
631         ret = __copy_to_user(eventp, event, sizeof(*event));
632         if (!ret)
633                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
634         else
635                 vq_err(vq, "Faulted on tcm_vhost_send_event\n");
636 }
637
638 static void tcm_vhost_evt_work(struct vhost_work *work)
639 {
640         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
641                                         vs_event_work);
642         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
643         struct tcm_vhost_evt *evt;
644         struct llist_node *llnode;
645
646         mutex_lock(&vq->mutex);
647         llnode = llist_del_all(&vs->vs_event_list);
648         while (llnode) {
649                 evt = llist_entry(llnode, struct tcm_vhost_evt, list);
650                 llnode = llist_next(llnode);
651                 tcm_vhost_do_evt_work(vs, evt);
652                 tcm_vhost_free_evt(vs, evt);
653         }
654         mutex_unlock(&vq->mutex);
655 }
656
657 /* Fill in status and signal that we are done processing this command
658  *
659  * This is scheduled in the vhost work queue so we are called with the owner
660  * process mm and can access the vring.
661  */
662 static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
663 {
664         struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
665                                         vs_completion_work);
666         DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
667         struct virtio_scsi_cmd_resp v_rsp;
668         struct tcm_vhost_cmd *cmd;
669         struct llist_node *llnode;
670         struct se_cmd *se_cmd;
671         int ret, vq;
672
673         bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
674         llnode = llist_del_all(&vs->vs_completion_list);
675         while (llnode) {
676                 cmd = llist_entry(llnode, struct tcm_vhost_cmd,
677                                      tvc_completion_list);
678                 llnode = llist_next(llnode);
679                 se_cmd = &cmd->tvc_se_cmd;
680
681                 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
682                         cmd, se_cmd->residual_count, se_cmd->scsi_status);
683
684                 memset(&v_rsp, 0, sizeof(v_rsp));
685                 v_rsp.resid = se_cmd->residual_count;
686                 /* TODO is status_qualifier field needed? */
687                 v_rsp.status = se_cmd->scsi_status;
688                 v_rsp.sense_len = se_cmd->scsi_sense_length;
689                 memcpy(v_rsp.sense, cmd->tvc_sense_buf,
690                        v_rsp.sense_len);
691                 ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
692                 if (likely(ret == 0)) {
693                         struct vhost_scsi_virtqueue *q;
694                         vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
695                         q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
696                         vq = q - vs->vqs;
697                         __set_bit(vq, signal);
698                 } else
699                         pr_err("Faulted on virtio_scsi_cmd_resp\n");
700
701                 vhost_scsi_free_cmd(cmd);
702         }
703
704         vq = -1;
705         while ((vq = find_next_bit(signal, VHOST_SCSI_MAX_VQ, vq + 1))
706                 < VHOST_SCSI_MAX_VQ)
707                 vhost_signal(&vs->dev, &vs->vqs[vq].vq);
708 }
709
710 static struct tcm_vhost_cmd *
711 vhost_scsi_get_tag(struct vhost_virtqueue *vq,
712                         struct tcm_vhost_tpg *tpg,
713                         struct virtio_scsi_cmd_req *v_req,
714                         u32 exp_data_len,
715                         int data_direction)
716 {
717         struct tcm_vhost_cmd *cmd;
718         struct tcm_vhost_nexus *tv_nexus;
719         struct se_session *se_sess;
720         struct scatterlist *sg;
721         struct page **pages;
722         int tag;
723
724         tv_nexus = tpg->tpg_nexus;
725         if (!tv_nexus) {
726                 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
727                 return ERR_PTR(-EIO);
728         }
729         se_sess = tv_nexus->tvn_se_sess;
730
731         tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
732         if (tag < 0) {
733                 pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
734                 return ERR_PTR(-ENOMEM);
735         }
736
737         cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
738         sg = cmd->tvc_sgl;
739         pages = cmd->tvc_upages;
740         memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
741
742         cmd->tvc_sgl = sg;
743         cmd->tvc_upages = pages;
744         cmd->tvc_se_cmd.map_tag = tag;
745         cmd->tvc_tag = v_req->tag;
746         cmd->tvc_task_attr = v_req->task_attr;
747         cmd->tvc_exp_data_len = exp_data_len;
748         cmd->tvc_data_direction = data_direction;
749         cmd->tvc_nexus = tv_nexus;
750         cmd->inflight = tcm_vhost_get_inflight(vq);
751
752         return cmd;
753 }
754
755 /*
756  * Map a user memory range into a scatterlist
757  *
758  * Returns the number of scatterlist entries used or -errno on error.
759  */
760 static int
761 vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
762                       struct scatterlist *sgl,
763                       unsigned int sgl_count,
764                       struct iovec *iov,
765                       int write)
766 {
767         unsigned int npages = 0, pages_nr, offset, nbytes;
768         struct scatterlist *sg = sgl;
769         void __user *ptr = iov->iov_base;
770         size_t len = iov->iov_len;
771         struct page **pages;
772         int ret, i;
773
774         if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
775                 pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
776                        " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
777                         sgl_count, TCM_VHOST_PREALLOC_SGLS);
778                 return -ENOBUFS;
779         }
780
781         pages_nr = iov_num_pages(iov);
782         if (pages_nr > sgl_count)
783                 return -ENOBUFS;
784
785         if (pages_nr > TCM_VHOST_PREALLOC_PAGES) {
786                 pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
787                        " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n",
788                         pages_nr, TCM_VHOST_PREALLOC_PAGES);
789                 return -ENOBUFS;
790         }
791
792         pages = tv_cmd->tvc_upages;
793
794         ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
795         /* No pages were pinned */
796         if (ret < 0)
797                 goto out;
798         /* Less pages pinned than wanted */
799         if (ret != pages_nr) {
800                 for (i = 0; i < ret; i++)
801                         put_page(pages[i]);
802                 ret = -EFAULT;
803                 goto out;
804         }
805
806         while (len > 0) {
807                 offset = (uintptr_t)ptr & ~PAGE_MASK;
808                 nbytes = min_t(unsigned int, PAGE_SIZE - offset, len);
809                 sg_set_page(sg, pages[npages], nbytes, offset);
810                 ptr += nbytes;
811                 len -= nbytes;
812                 sg++;
813                 npages++;
814         }
815
816 out:
817         return ret;
818 }
819
820 static int
821 vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
822                           struct iovec *iov,
823                           unsigned int niov,
824                           int write)
825 {
826         int ret;
827         unsigned int i;
828         u32 sgl_count;
829         struct scatterlist *sg;
830
831         /*
832          * Find out how long sglist needs to be
833          */
834         sgl_count = 0;
835         for (i = 0; i < niov; i++)
836                 sgl_count += iov_num_pages(&iov[i]);
837
838         /* TODO overflow checking */
839
840         sg = cmd->tvc_sgl;
841         pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
842         sg_init_table(sg, sgl_count);
843
844         cmd->tvc_sgl_count = sgl_count;
845
846         pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
847         for (i = 0; i < niov; i++) {
848                 ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
849                                             write);
850                 if (ret < 0) {
851                         for (i = 0; i < cmd->tvc_sgl_count; i++)
852                                 put_page(sg_page(&cmd->tvc_sgl[i]));
853
854                         cmd->tvc_sgl_count = 0;
855                         return ret;
856                 }
857
858                 sg += ret;
859                 sgl_count -= ret;
860         }
861         return 0;
862 }
863
864 static void tcm_vhost_submission_work(struct work_struct *work)
865 {
866         struct tcm_vhost_cmd *cmd =
867                 container_of(work, struct tcm_vhost_cmd, work);
868         struct tcm_vhost_nexus *tv_nexus;
869         struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
870         struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
871         int rc, sg_no_bidi = 0;
872
873         if (cmd->tvc_sgl_count) {
874                 sg_ptr = cmd->tvc_sgl;
875 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
876 #if 0
877                 if (se_cmd->se_cmd_flags & SCF_BIDI) {
878                         sg_bidi_ptr = NULL;
879                         sg_no_bidi = 0;
880                 }
881 #endif
882         } else {
883                 sg_ptr = NULL;
884         }
885         tv_nexus = cmd->tvc_nexus;
886
887         rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
888                         cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
889                         cmd->tvc_lun, cmd->tvc_exp_data_len,
890                         cmd->tvc_task_attr, cmd->tvc_data_direction,
891                         TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
892                         sg_bidi_ptr, sg_no_bidi);
893         if (rc < 0) {
894                 transport_send_check_condition_and_sense(se_cmd,
895                                 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
896                 transport_generic_free_cmd(se_cmd, 0);
897         }
898 }
899
900 static void
901 vhost_scsi_send_bad_target(struct vhost_scsi *vs,
902                            struct vhost_virtqueue *vq,
903                            int head, unsigned out)
904 {
905         struct virtio_scsi_cmd_resp __user *resp;
906         struct virtio_scsi_cmd_resp rsp;
907         int ret;
908
909         memset(&rsp, 0, sizeof(rsp));
910         rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
911         resp = vq->iov[out].iov_base;
912         ret = __copy_to_user(resp, &rsp, sizeof(rsp));
913         if (!ret)
914                 vhost_add_used_and_signal(&vs->dev, vq, head, 0);
915         else
916                 pr_err("Faulted on virtio_scsi_cmd_resp\n");
917 }
918
919 static void
920 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
921 {
922         struct tcm_vhost_tpg **vs_tpg;
923         struct virtio_scsi_cmd_req v_req;
924         struct tcm_vhost_tpg *tpg;
925         struct tcm_vhost_cmd *cmd;
926         u32 exp_data_len, data_first, data_num, data_direction;
927         unsigned out, in, i;
928         int head, ret;
929         u8 target;
930
931         mutex_lock(&vq->mutex);
932         /*
933          * We can handle the vq only after the endpoint is setup by calling the
934          * VHOST_SCSI_SET_ENDPOINT ioctl.
935          */
936         vs_tpg = vq->private_data;
937         if (!vs_tpg)
938                 goto out;
939
940         vhost_disable_notify(&vs->dev, vq);
941
942         for (;;) {
943                 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
944                                         ARRAY_SIZE(vq->iov), &out, &in,
945                                         NULL, NULL);
946                 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
947                                         head, out, in);
948                 /* On error, stop handling until the next kick. */
949                 if (unlikely(head < 0))
950                         break;
951                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
952                 if (head == vq->num) {
953                         if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
954                                 vhost_disable_notify(&vs->dev, vq);
955                                 continue;
956                         }
957                         break;
958                 }
959
960 /* FIXME: BIDI operation */
961                 if (out == 1 && in == 1) {
962                         data_direction = DMA_NONE;
963                         data_first = 0;
964                         data_num = 0;
965                 } else if (out == 1 && in > 1) {
966                         data_direction = DMA_FROM_DEVICE;
967                         data_first = out + 1;
968                         data_num = in - 1;
969                 } else if (out > 1 && in == 1) {
970                         data_direction = DMA_TO_DEVICE;
971                         data_first = 1;
972                         data_num = out - 1;
973                 } else {
974                         vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
975                                         out, in);
976                         break;
977                 }
978
979                 /*
980                  * Check for a sane resp buffer so we can report errors to
981                  * the guest.
982                  */
983                 if (unlikely(vq->iov[out].iov_len !=
984                                         sizeof(struct virtio_scsi_cmd_resp))) {
985                         vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
986                                 " bytes\n", vq->iov[out].iov_len);
987                         break;
988                 }
989
990                 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
991                         vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
992                                 " bytes\n", vq->iov[0].iov_len);
993                         break;
994                 }
995                 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
996                         " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
997                 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
998                                 sizeof(v_req));
999                 if (unlikely(ret)) {
1000                         vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
1001                         break;
1002                 }
1003
1004                 /* Extract the tpgt */
1005                 target = v_req.lun[1];
1006                 tpg = ACCESS_ONCE(vs_tpg[target]);
1007
1008                 /* Target does not exist, fail the request */
1009                 if (unlikely(!tpg)) {
1010                         vhost_scsi_send_bad_target(vs, vq, head, out);
1011                         continue;
1012                 }
1013
1014                 exp_data_len = 0;
1015                 for (i = 0; i < data_num; i++)
1016                         exp_data_len += vq->iov[data_first + i].iov_len;
1017
1018                 cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
1019                                          exp_data_len, data_direction);
1020                 if (IS_ERR(cmd)) {
1021                         vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
1022                                         PTR_ERR(cmd));
1023                         goto err_cmd;
1024                 }
1025                 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
1026                         ": %d\n", cmd, exp_data_len, data_direction);
1027
1028                 cmd->tvc_vhost = vs;
1029                 cmd->tvc_vq = vq;
1030                 cmd->tvc_resp = vq->iov[out].iov_base;
1031
1032                 /*
1033                  * Copy in the recieved CDB descriptor into cmd->tvc_cdb
1034                  * that will be used by tcm_vhost_new_cmd_map() and down into
1035                  * target_setup_cmd_from_cdb()
1036                  */
1037                 memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
1038                 /*
1039                  * Check that the recieved CDB size does not exceeded our
1040                  * hardcoded max for tcm_vhost
1041                  */
1042                 /* TODO what if cdb was too small for varlen cdb header? */
1043                 if (unlikely(scsi_command_size(cmd->tvc_cdb) >
1044                                         TCM_VHOST_MAX_CDB_SIZE)) {
1045                         vq_err(vq, "Received SCSI CDB with command_size: %d that"
1046                                 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1047                                 scsi_command_size(cmd->tvc_cdb),
1048                                 TCM_VHOST_MAX_CDB_SIZE);
1049                         goto err_free;
1050                 }
1051                 cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
1052
1053                 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
1054                         cmd->tvc_cdb[0], cmd->tvc_lun);
1055
1056                 if (data_direction != DMA_NONE) {
1057                         ret = vhost_scsi_map_iov_to_sgl(cmd,
1058                                         &vq->iov[data_first], data_num,
1059                                         data_direction == DMA_FROM_DEVICE);
1060                         if (unlikely(ret)) {
1061                                 vq_err(vq, "Failed to map iov to sgl\n");
1062                                 goto err_free;
1063                         }
1064                 }
1065
1066                 /*
1067                  * Save the descriptor from vhost_get_vq_desc() to be used to
1068                  * complete the virtio-scsi request in TCM callback context via
1069                  * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
1070                  */
1071                 cmd->tvc_vq_desc = head;
1072                 /*
1073                  * Dispatch tv_cmd descriptor for cmwq execution in process
1074                  * context provided by tcm_vhost_workqueue.  This also ensures
1075                  * tv_cmd is executed on the same kworker CPU as this vhost
1076                  * thread to gain positive L2 cache locality effects..
1077                  */
1078                 INIT_WORK(&cmd->work, tcm_vhost_submission_work);
1079                 queue_work(tcm_vhost_workqueue, &cmd->work);
1080         }
1081
1082         mutex_unlock(&vq->mutex);
1083         return;
1084
1085 err_free:
1086         vhost_scsi_free_cmd(cmd);
1087 err_cmd:
1088         vhost_scsi_send_bad_target(vs, vq, head, out);
1089 out:
1090         mutex_unlock(&vq->mutex);
1091 }
1092
1093 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
1094 {
1095         pr_debug("%s: The handling func for control queue.\n", __func__);
1096 }
1097
1098 static void
1099 tcm_vhost_send_evt(struct vhost_scsi *vs,
1100                    struct tcm_vhost_tpg *tpg,
1101                    struct se_lun *lun,
1102                    u32 event,
1103                    u32 reason)
1104 {
1105         struct tcm_vhost_evt *evt;
1106
1107         evt = tcm_vhost_allocate_evt(vs, event, reason);
1108         if (!evt)
1109                 return;
1110
1111         if (tpg && lun) {
1112                 /* TODO: share lun setup code with virtio-scsi.ko */
1113                 /*
1114                  * Note: evt->event is zeroed when we allocate it and
1115                  * lun[4-7] need to be zero according to virtio-scsi spec.
1116                  */
1117                 evt->event.lun[0] = 0x01;
1118                 evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
1119                 if (lun->unpacked_lun >= 256)
1120                         evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
1121                 evt->event.lun[3] = lun->unpacked_lun & 0xFF;
1122         }
1123
1124         llist_add(&evt->list, &vs->vs_event_list);
1125         vhost_work_queue(&vs->dev, &vs->vs_event_work);
1126 }
1127
1128 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
1129 {
1130         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1131                                                 poll.work);
1132         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1133
1134         mutex_lock(&vq->mutex);
1135         if (!vq->private_data)
1136                 goto out;
1137
1138         if (vs->vs_events_missed)
1139                 tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
1140 out:
1141         mutex_unlock(&vq->mutex);
1142 }
1143
1144 static void vhost_scsi_handle_kick(struct vhost_work *work)
1145 {
1146         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1147                                                 poll.work);
1148         struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
1149
1150         vhost_scsi_handle_vq(vs, vq);
1151 }
1152
1153 static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
1154 {
1155         vhost_poll_flush(&vs->vqs[index].vq.poll);
1156 }
1157
1158 /* Callers must hold dev mutex */
1159 static void vhost_scsi_flush(struct vhost_scsi *vs)
1160 {
1161         struct vhost_scsi_inflight *old_inflight[VHOST_SCSI_MAX_VQ];
1162         int i;
1163
1164         /* Init new inflight and remember the old inflight */
1165         tcm_vhost_init_inflight(vs, old_inflight);
1166
1167         /*
1168          * The inflight->kref was initialized to 1. We decrement it here to
1169          * indicate the start of the flush operation so that it will reach 0
1170          * when all the reqs are finished.
1171          */
1172         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1173                 kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
1174
1175         /* Flush both the vhost poll and vhost work */
1176         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1177                 vhost_scsi_flush_vq(vs, i);
1178         vhost_work_flush(&vs->dev, &vs->vs_completion_work);
1179         vhost_work_flush(&vs->dev, &vs->vs_event_work);
1180
1181         /* Wait for all reqs issued before the flush to be finished */
1182         for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
1183                 wait_for_completion(&old_inflight[i]->comp);
1184 }
1185
1186 /*
1187  * Called from vhost_scsi_ioctl() context to walk the list of available
1188  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
1189  *
1190  *  The lock nesting rule is:
1191  *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
1192  */
1193 static int
1194 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1195                         struct vhost_scsi_target *t)
1196 {
1197         struct tcm_vhost_tport *tv_tport;
1198         struct tcm_vhost_tpg *tpg;
1199         struct tcm_vhost_tpg **vs_tpg;
1200         struct vhost_virtqueue *vq;
1201         int index, ret, i, len;
1202         bool match = false;
1203
1204         mutex_lock(&tcm_vhost_mutex);
1205         mutex_lock(&vs->dev.mutex);
1206
1207         /* Verify that ring has been setup correctly. */
1208         for (index = 0; index < vs->dev.nvqs; ++index) {
1209                 /* Verify that ring has been setup correctly. */
1210                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1211                         ret = -EFAULT;
1212                         goto out;
1213                 }
1214         }
1215
1216         len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
1217         vs_tpg = kzalloc(len, GFP_KERNEL);
1218         if (!vs_tpg) {
1219                 ret = -ENOMEM;
1220                 goto out;
1221         }
1222         if (vs->vs_tpg)
1223                 memcpy(vs_tpg, vs->vs_tpg, len);
1224
1225         list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
1226                 mutex_lock(&tpg->tv_tpg_mutex);
1227                 if (!tpg->tpg_nexus) {
1228                         mutex_unlock(&tpg->tv_tpg_mutex);
1229                         continue;
1230                 }
1231                 if (tpg->tv_tpg_vhost_count != 0) {
1232                         mutex_unlock(&tpg->tv_tpg_mutex);
1233                         continue;
1234                 }
1235                 tv_tport = tpg->tport;
1236
1237                 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1238                         if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1239                                 kfree(vs_tpg);
1240                                 mutex_unlock(&tpg->tv_tpg_mutex);
1241                                 ret = -EEXIST;
1242                                 goto out;
1243                         }
1244                         tpg->tv_tpg_vhost_count++;
1245                         tpg->vhost_scsi = vs;
1246                         vs_tpg[tpg->tport_tpgt] = tpg;
1247                         smp_mb__after_atomic_inc();
1248                         match = true;
1249                 }
1250                 mutex_unlock(&tpg->tv_tpg_mutex);
1251         }
1252
1253         if (match) {
1254                 memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
1255                        sizeof(vs->vs_vhost_wwpn));
1256                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1257                         vq = &vs->vqs[i].vq;
1258                         mutex_lock(&vq->mutex);
1259                         vq->private_data = vs_tpg;
1260                         vhost_init_used(vq);
1261                         mutex_unlock(&vq->mutex);
1262                 }
1263                 ret = 0;
1264         } else {
1265                 ret = -EEXIST;
1266         }
1267
1268         /*
1269          * Act as synchronize_rcu to make sure access to
1270          * old vs->vs_tpg is finished.
1271          */
1272         vhost_scsi_flush(vs);
1273         kfree(vs->vs_tpg);
1274         vs->vs_tpg = vs_tpg;
1275
1276 out:
1277         mutex_unlock(&vs->dev.mutex);
1278         mutex_unlock(&tcm_vhost_mutex);
1279         return ret;
1280 }
1281
1282 static int
1283 vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1284                           struct vhost_scsi_target *t)
1285 {
1286         struct tcm_vhost_tport *tv_tport;
1287         struct tcm_vhost_tpg *tpg;
1288         struct vhost_virtqueue *vq;
1289         bool match = false;
1290         int index, ret, i;
1291         u8 target;
1292
1293         mutex_lock(&tcm_vhost_mutex);
1294         mutex_lock(&vs->dev.mutex);
1295         /* Verify that ring has been setup correctly. */
1296         for (index = 0; index < vs->dev.nvqs; ++index) {
1297                 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) {
1298                         ret = -EFAULT;
1299                         goto err_dev;
1300                 }
1301         }
1302
1303         if (!vs->vs_tpg) {
1304                 ret = 0;
1305                 goto err_dev;
1306         }
1307
1308         for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
1309                 target = i;
1310                 tpg = vs->vs_tpg[target];
1311                 if (!tpg)
1312                         continue;
1313
1314                 mutex_lock(&tpg->tv_tpg_mutex);
1315                 tv_tport = tpg->tport;
1316                 if (!tv_tport) {
1317                         ret = -ENODEV;
1318                         goto err_tpg;
1319                 }
1320
1321                 if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
1322                         pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1323                                 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
1324                                 tv_tport->tport_name, tpg->tport_tpgt,
1325                                 t->vhost_wwpn, t->vhost_tpgt);
1326                         ret = -EINVAL;
1327                         goto err_tpg;
1328                 }
1329                 tpg->tv_tpg_vhost_count--;
1330                 tpg->vhost_scsi = NULL;
1331                 vs->vs_tpg[target] = NULL;
1332                 match = true;
1333                 mutex_unlock(&tpg->tv_tpg_mutex);
1334         }
1335         if (match) {
1336                 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
1337                         vq = &vs->vqs[i].vq;
1338                         mutex_lock(&vq->mutex);
1339                         vq->private_data = NULL;
1340                         mutex_unlock(&vq->mutex);
1341                 }
1342         }
1343         /*
1344          * Act as synchronize_rcu to make sure access to
1345          * old vs->vs_tpg is finished.
1346          */
1347         vhost_scsi_flush(vs);
1348         kfree(vs->vs_tpg);
1349         vs->vs_tpg = NULL;
1350         WARN_ON(vs->vs_events_nr);
1351         mutex_unlock(&vs->dev.mutex);
1352         mutex_unlock(&tcm_vhost_mutex);
1353         return 0;
1354
1355 err_tpg:
1356         mutex_unlock(&tpg->tv_tpg_mutex);
1357 err_dev:
1358         mutex_unlock(&vs->dev.mutex);
1359         mutex_unlock(&tcm_vhost_mutex);
1360         return ret;
1361 }
1362
1363 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1364 {
1365         if (features & ~VHOST_SCSI_FEATURES)
1366                 return -EOPNOTSUPP;
1367
1368         mutex_lock(&vs->dev.mutex);
1369         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1370             !vhost_log_access_ok(&vs->dev)) {
1371                 mutex_unlock(&vs->dev.mutex);
1372                 return -EFAULT;
1373         }
1374         vs->dev.acked_features = features;
1375         smp_wmb();
1376         vhost_scsi_flush(vs);
1377         mutex_unlock(&vs->dev.mutex);
1378         return 0;
1379 }
1380
1381 static void vhost_scsi_free(struct vhost_scsi *vs)
1382 {
1383         if (is_vmalloc_addr(vs))
1384                 vfree(vs);
1385         else
1386                 kfree(vs);
1387 }
1388
1389 static int vhost_scsi_open(struct inode *inode, struct file *f)
1390 {
1391         struct vhost_scsi *vs;
1392         struct vhost_virtqueue **vqs;
1393         int r = -ENOMEM, i;
1394
1395         vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
1396         if (!vs) {
1397                 vs = vzalloc(sizeof(*vs));
1398                 if (!vs)
1399                         goto err_vs;
1400         }
1401
1402         vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL);
1403         if (!vqs)
1404                 goto err_vqs;
1405
1406         vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
1407         vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
1408
1409         vs->vs_events_nr = 0;
1410         vs->vs_events_missed = false;
1411
1412         vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq;
1413         vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1414         vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
1415         vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
1416         for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_MAX_VQ; i++) {
1417                 vqs[i] = &vs->vqs[i].vq;
1418                 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1419         }
1420         vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
1421
1422         tcm_vhost_init_inflight(vs, NULL);
1423
1424         f->private_data = vs;
1425         return 0;
1426
1427 err_vqs:
1428         vhost_scsi_free(vs);
1429 err_vs:
1430         return r;
1431 }
1432
1433 static int vhost_scsi_release(struct inode *inode, struct file *f)
1434 {
1435         struct vhost_scsi *vs = f->private_data;
1436         struct vhost_scsi_target t;
1437
1438         mutex_lock(&vs->dev.mutex);
1439         memcpy(t.vhost_wwpn, vs->vs_vhost_wwpn, sizeof(t.vhost_wwpn));
1440         mutex_unlock(&vs->dev.mutex);
1441         vhost_scsi_clear_endpoint(vs, &t);
1442         vhost_dev_stop(&vs->dev);
1443         vhost_dev_cleanup(&vs->dev, false);
1444         /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1445         vhost_scsi_flush(vs);
1446         kfree(vs->dev.vqs);
1447         vhost_scsi_free(vs);
1448         return 0;
1449 }
1450
1451 static long
1452 vhost_scsi_ioctl(struct file *f,
1453                  unsigned int ioctl,
1454                  unsigned long arg)
1455 {
1456         struct vhost_scsi *vs = f->private_data;
1457         struct vhost_scsi_target backend;
1458         void __user *argp = (void __user *)arg;
1459         u64 __user *featurep = argp;
1460         u32 __user *eventsp = argp;
1461         u32 events_missed;
1462         u64 features;
1463         int r, abi_version = VHOST_SCSI_ABI_VERSION;
1464         struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1465
1466         switch (ioctl) {
1467         case VHOST_SCSI_SET_ENDPOINT:
1468                 if (copy_from_user(&backend, argp, sizeof backend))
1469                         return -EFAULT;
1470                 if (backend.reserved != 0)
1471                         return -EOPNOTSUPP;
1472
1473                 return vhost_scsi_set_endpoint(vs, &backend);
1474         case VHOST_SCSI_CLEAR_ENDPOINT:
1475                 if (copy_from_user(&backend, argp, sizeof backend))
1476                         return -EFAULT;
1477                 if (backend.reserved != 0)
1478                         return -EOPNOTSUPP;
1479
1480                 return vhost_scsi_clear_endpoint(vs, &backend);
1481         case VHOST_SCSI_GET_ABI_VERSION:
1482                 if (copy_to_user(argp, &abi_version, sizeof abi_version))
1483                         return -EFAULT;
1484                 return 0;
1485         case VHOST_SCSI_SET_EVENTS_MISSED:
1486                 if (get_user(events_missed, eventsp))
1487                         return -EFAULT;
1488                 mutex_lock(&vq->mutex);
1489                 vs->vs_events_missed = events_missed;
1490                 mutex_unlock(&vq->mutex);
1491                 return 0;
1492         case VHOST_SCSI_GET_EVENTS_MISSED:
1493                 mutex_lock(&vq->mutex);
1494                 events_missed = vs->vs_events_missed;
1495                 mutex_unlock(&vq->mutex);
1496                 if (put_user(events_missed, eventsp))
1497                         return -EFAULT;
1498                 return 0;
1499         case VHOST_GET_FEATURES:
1500                 features = VHOST_SCSI_FEATURES;
1501                 if (copy_to_user(featurep, &features, sizeof features))
1502                         return -EFAULT;
1503                 return 0;
1504         case VHOST_SET_FEATURES:
1505                 if (copy_from_user(&features, featurep, sizeof features))
1506                         return -EFAULT;
1507                 return vhost_scsi_set_features(vs, features);
1508         default:
1509                 mutex_lock(&vs->dev.mutex);
1510                 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
1511                 /* TODO: flush backend after dev ioctl. */
1512                 if (r == -ENOIOCTLCMD)
1513                         r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
1514                 mutex_unlock(&vs->dev.mutex);
1515                 return r;
1516         }
1517 }
1518
1519 #ifdef CONFIG_COMPAT
1520 static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
1521                                 unsigned long arg)
1522 {
1523         return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1524 }
1525 #endif
1526
1527 static const struct file_operations vhost_scsi_fops = {
1528         .owner          = THIS_MODULE,
1529         .release        = vhost_scsi_release,
1530         .unlocked_ioctl = vhost_scsi_ioctl,
1531 #ifdef CONFIG_COMPAT
1532         .compat_ioctl   = vhost_scsi_compat_ioctl,
1533 #endif
1534         .open           = vhost_scsi_open,
1535         .llseek         = noop_llseek,
1536 };
1537
1538 static struct miscdevice vhost_scsi_misc = {
1539         MISC_DYNAMIC_MINOR,
1540         "vhost-scsi",
1541         &vhost_scsi_fops,
1542 };
1543
1544 static int __init vhost_scsi_register(void)
1545 {
1546         return misc_register(&vhost_scsi_misc);
1547 }
1548
1549 static int vhost_scsi_deregister(void)
1550 {
1551         return misc_deregister(&vhost_scsi_misc);
1552 }
1553
1554 static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1555 {
1556         switch (tport->tport_proto_id) {
1557         case SCSI_PROTOCOL_SAS:
1558                 return "SAS";
1559         case SCSI_PROTOCOL_FCP:
1560                 return "FCP";
1561         case SCSI_PROTOCOL_ISCSI:
1562                 return "iSCSI";
1563         default:
1564                 break;
1565         }
1566
1567         return "Unknown";
1568 }
1569
1570 static void
1571 tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
1572                   struct se_lun *lun, bool plug)
1573 {
1574
1575         struct vhost_scsi *vs = tpg->vhost_scsi;
1576         struct vhost_virtqueue *vq;
1577         u32 reason;
1578
1579         if (!vs)
1580                 return;
1581
1582         mutex_lock(&vs->dev.mutex);
1583         if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
1584                 mutex_unlock(&vs->dev.mutex);
1585                 return;
1586         }
1587
1588         if (plug)
1589                 reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
1590         else
1591                 reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
1592
1593         vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
1594         mutex_lock(&vq->mutex);
1595         tcm_vhost_send_evt(vs, tpg, lun,
1596                         VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
1597         mutex_unlock(&vq->mutex);
1598         mutex_unlock(&vs->dev.mutex);
1599 }
1600
1601 static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1602 {
1603         tcm_vhost_do_plug(tpg, lun, true);
1604 }
1605
1606 static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
1607 {
1608         tcm_vhost_do_plug(tpg, lun, false);
1609 }
1610
1611 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
1612                                struct se_lun *lun)
1613 {
1614         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1615                                 struct tcm_vhost_tpg, se_tpg);
1616
1617         mutex_lock(&tcm_vhost_mutex);
1618
1619         mutex_lock(&tpg->tv_tpg_mutex);
1620         tpg->tv_tpg_port_count++;
1621         mutex_unlock(&tpg->tv_tpg_mutex);
1622
1623         tcm_vhost_hotplug(tpg, lun);
1624
1625         mutex_unlock(&tcm_vhost_mutex);
1626
1627         return 0;
1628 }
1629
1630 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
1631                                   struct se_lun *lun)
1632 {
1633         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1634                                 struct tcm_vhost_tpg, se_tpg);
1635
1636         mutex_lock(&tcm_vhost_mutex);
1637
1638         mutex_lock(&tpg->tv_tpg_mutex);
1639         tpg->tv_tpg_port_count--;
1640         mutex_unlock(&tpg->tv_tpg_mutex);
1641
1642         tcm_vhost_hotunplug(tpg, lun);
1643
1644         mutex_unlock(&tcm_vhost_mutex);
1645 }
1646
1647 static struct se_node_acl *
1648 tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
1649                        struct config_group *group,
1650                        const char *name)
1651 {
1652         struct se_node_acl *se_nacl, *se_nacl_new;
1653         struct tcm_vhost_nacl *nacl;
1654         u64 wwpn = 0;
1655         u32 nexus_depth;
1656
1657         /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1658                 return ERR_PTR(-EINVAL); */
1659         se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1660         if (!se_nacl_new)
1661                 return ERR_PTR(-ENOMEM);
1662
1663         nexus_depth = 1;
1664         /*
1665          * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1666          * when converting a NodeACL from demo mode -> explict
1667          */
1668         se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1669                                 name, nexus_depth);
1670         if (IS_ERR(se_nacl)) {
1671                 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1672                 return se_nacl;
1673         }
1674         /*
1675          * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1676          */
1677         nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1678         nacl->iport_wwpn = wwpn;
1679
1680         return se_nacl;
1681 }
1682
1683 static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1684 {
1685         struct tcm_vhost_nacl *nacl = container_of(se_acl,
1686                                 struct tcm_vhost_nacl, se_node_acl);
1687         core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1688         kfree(nacl);
1689 }
1690
1691 static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
1692                                        struct se_session *se_sess)
1693 {
1694         struct tcm_vhost_cmd *tv_cmd;
1695         unsigned int i;
1696
1697         if (!se_sess->sess_cmd_map)
1698                 return;
1699
1700         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1701                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1702
1703                 kfree(tv_cmd->tvc_sgl);
1704                 kfree(tv_cmd->tvc_upages);
1705         }
1706 }
1707
1708 static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
1709                                 const char *name)
1710 {
1711         struct se_portal_group *se_tpg;
1712         struct se_session *se_sess;
1713         struct tcm_vhost_nexus *tv_nexus;
1714         struct tcm_vhost_cmd *tv_cmd;
1715         unsigned int i;
1716
1717         mutex_lock(&tpg->tv_tpg_mutex);
1718         if (tpg->tpg_nexus) {
1719                 mutex_unlock(&tpg->tv_tpg_mutex);
1720                 pr_debug("tpg->tpg_nexus already exists\n");
1721                 return -EEXIST;
1722         }
1723         se_tpg = &tpg->se_tpg;
1724
1725         tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1726         if (!tv_nexus) {
1727                 mutex_unlock(&tpg->tv_tpg_mutex);
1728                 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1729                 return -ENOMEM;
1730         }
1731         /*
1732          *  Initialize the struct se_session pointer and setup tagpool
1733          *  for struct tcm_vhost_cmd descriptors
1734          */
1735         tv_nexus->tvn_se_sess = transport_init_session_tags(
1736                                         TCM_VHOST_DEFAULT_TAGS,
1737                                         sizeof(struct tcm_vhost_cmd));
1738         if (IS_ERR(tv_nexus->tvn_se_sess)) {
1739                 mutex_unlock(&tpg->tv_tpg_mutex);
1740                 kfree(tv_nexus);
1741                 return -ENOMEM;
1742         }
1743         se_sess = tv_nexus->tvn_se_sess;
1744         for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
1745                 tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
1746
1747                 tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
1748                                         TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
1749                 if (!tv_cmd->tvc_sgl) {
1750                         mutex_unlock(&tpg->tv_tpg_mutex);
1751                         pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
1752                         goto out;
1753                 }
1754
1755                 tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
1756                                         TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL);
1757                 if (!tv_cmd->tvc_upages) {
1758                         mutex_unlock(&tpg->tv_tpg_mutex);
1759                         pr_err("Unable to allocate tv_cmd->tvc_upages\n");
1760                         goto out;
1761                 }
1762         }
1763         /*
1764          * Since we are running in 'demo mode' this call with generate a
1765          * struct se_node_acl for the tcm_vhost struct se_portal_group with
1766          * the SCSI Initiator port name of the passed configfs group 'name'.
1767          */
1768         tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1769                                 se_tpg, (unsigned char *)name);
1770         if (!tv_nexus->tvn_se_sess->se_node_acl) {
1771                 mutex_unlock(&tpg->tv_tpg_mutex);
1772                 pr_debug("core_tpg_check_initiator_node_acl() failed"
1773                                 " for %s\n", name);
1774                 goto out;
1775         }
1776         /*
1777          * Now register the TCM vhost virtual I_T Nexus as active with the
1778          * call to __transport_register_session()
1779          */
1780         __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1781                         tv_nexus->tvn_se_sess, tv_nexus);
1782         tpg->tpg_nexus = tv_nexus;
1783
1784         mutex_unlock(&tpg->tv_tpg_mutex);
1785         return 0;
1786
1787 out:
1788         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1789         transport_free_session(se_sess);
1790         kfree(tv_nexus);
1791         return -ENOMEM;
1792 }
1793
1794 static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
1795 {
1796         struct se_session *se_sess;
1797         struct tcm_vhost_nexus *tv_nexus;
1798
1799         mutex_lock(&tpg->tv_tpg_mutex);
1800         tv_nexus = tpg->tpg_nexus;
1801         if (!tv_nexus) {
1802                 mutex_unlock(&tpg->tv_tpg_mutex);
1803                 return -ENODEV;
1804         }
1805
1806         se_sess = tv_nexus->tvn_se_sess;
1807         if (!se_sess) {
1808                 mutex_unlock(&tpg->tv_tpg_mutex);
1809                 return -ENODEV;
1810         }
1811
1812         if (tpg->tv_tpg_port_count != 0) {
1813                 mutex_unlock(&tpg->tv_tpg_mutex);
1814                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1815                         " active TPG port count: %d\n",
1816                         tpg->tv_tpg_port_count);
1817                 return -EBUSY;
1818         }
1819
1820         if (tpg->tv_tpg_vhost_count != 0) {
1821                 mutex_unlock(&tpg->tv_tpg_mutex);
1822                 pr_err("Unable to remove TCM_vhost I_T Nexus with"
1823                         " active TPG vhost count: %d\n",
1824                         tpg->tv_tpg_vhost_count);
1825                 return -EBUSY;
1826         }
1827
1828         pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
1829                 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1830                 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1831
1832         tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
1833         /*
1834          * Release the SCSI I_T Nexus to the emulated vhost Target Port
1835          */
1836         transport_deregister_session(tv_nexus->tvn_se_sess);
1837         tpg->tpg_nexus = NULL;
1838         mutex_unlock(&tpg->tv_tpg_mutex);
1839
1840         kfree(tv_nexus);
1841         return 0;
1842 }
1843
1844 static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
1845                                         char *page)
1846 {
1847         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1848                                 struct tcm_vhost_tpg, se_tpg);
1849         struct tcm_vhost_nexus *tv_nexus;
1850         ssize_t ret;
1851
1852         mutex_lock(&tpg->tv_tpg_mutex);
1853         tv_nexus = tpg->tpg_nexus;
1854         if (!tv_nexus) {
1855                 mutex_unlock(&tpg->tv_tpg_mutex);
1856                 return -ENODEV;
1857         }
1858         ret = snprintf(page, PAGE_SIZE, "%s\n",
1859                         tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1860         mutex_unlock(&tpg->tv_tpg_mutex);
1861
1862         return ret;
1863 }
1864
1865 static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
1866                                          const char *page,
1867                                          size_t count)
1868 {
1869         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1870                                 struct tcm_vhost_tpg, se_tpg);
1871         struct tcm_vhost_tport *tport_wwn = tpg->tport;
1872         unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1873         int ret;
1874         /*
1875          * Shutdown the active I_T nexus if 'NULL' is passed..
1876          */
1877         if (!strncmp(page, "NULL", 4)) {
1878                 ret = tcm_vhost_drop_nexus(tpg);
1879                 return (!ret) ? count : ret;
1880         }
1881         /*
1882          * Otherwise make sure the passed virtual Initiator port WWN matches
1883          * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1884          * tcm_vhost_make_nexus().
1885          */
1886         if (strlen(page) >= TCM_VHOST_NAMELEN) {
1887                 pr_err("Emulated NAA Sas Address: %s, exceeds"
1888                                 " max: %d\n", page, TCM_VHOST_NAMELEN);
1889                 return -EINVAL;
1890         }
1891         snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1892
1893         ptr = strstr(i_port, "naa.");
1894         if (ptr) {
1895                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1896                         pr_err("Passed SAS Initiator Port %s does not"
1897                                 " match target port protoid: %s\n", i_port,
1898                                 tcm_vhost_dump_proto_id(tport_wwn));
1899                         return -EINVAL;
1900                 }
1901                 port_ptr = &i_port[0];
1902                 goto check_newline;
1903         }
1904         ptr = strstr(i_port, "fc.");
1905         if (ptr) {
1906                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1907                         pr_err("Passed FCP Initiator Port %s does not"
1908                                 " match target port protoid: %s\n", i_port,
1909                                 tcm_vhost_dump_proto_id(tport_wwn));
1910                         return -EINVAL;
1911                 }
1912                 port_ptr = &i_port[3]; /* Skip over "fc." */
1913                 goto check_newline;
1914         }
1915         ptr = strstr(i_port, "iqn.");
1916         if (ptr) {
1917                 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1918                         pr_err("Passed iSCSI Initiator Port %s does not"
1919                                 " match target port protoid: %s\n", i_port,
1920                                 tcm_vhost_dump_proto_id(tport_wwn));
1921                         return -EINVAL;
1922                 }
1923                 port_ptr = &i_port[0];
1924                 goto check_newline;
1925         }
1926         pr_err("Unable to locate prefix for emulated Initiator Port:"
1927                         " %s\n", i_port);
1928         return -EINVAL;
1929         /*
1930          * Clear any trailing newline for the NAA WWN
1931          */
1932 check_newline:
1933         if (i_port[strlen(i_port)-1] == '\n')
1934                 i_port[strlen(i_port)-1] = '\0';
1935
1936         ret = tcm_vhost_make_nexus(tpg, port_ptr);
1937         if (ret < 0)
1938                 return ret;
1939
1940         return count;
1941 }
1942
1943 TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1944
1945 static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1946         &tcm_vhost_tpg_nexus.attr,
1947         NULL,
1948 };
1949
1950 static struct se_portal_group *
1951 tcm_vhost_make_tpg(struct se_wwn *wwn,
1952                    struct config_group *group,
1953                    const char *name)
1954 {
1955         struct tcm_vhost_tport *tport = container_of(wwn,
1956                         struct tcm_vhost_tport, tport_wwn);
1957
1958         struct tcm_vhost_tpg *tpg;
1959         unsigned long tpgt;
1960         int ret;
1961
1962         if (strstr(name, "tpgt_") != name)
1963                 return ERR_PTR(-EINVAL);
1964         if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1965                 return ERR_PTR(-EINVAL);
1966
1967         tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1968         if (!tpg) {
1969                 pr_err("Unable to allocate struct tcm_vhost_tpg");
1970                 return ERR_PTR(-ENOMEM);
1971         }
1972         mutex_init(&tpg->tv_tpg_mutex);
1973         INIT_LIST_HEAD(&tpg->tv_tpg_list);
1974         tpg->tport = tport;
1975         tpg->tport_tpgt = tpgt;
1976
1977         ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1978                                 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1979         if (ret < 0) {
1980                 kfree(tpg);
1981                 return NULL;
1982         }
1983         mutex_lock(&tcm_vhost_mutex);
1984         list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1985         mutex_unlock(&tcm_vhost_mutex);
1986
1987         return &tpg->se_tpg;
1988 }
1989
1990 static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1991 {
1992         struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1993                                 struct tcm_vhost_tpg, se_tpg);
1994
1995         mutex_lock(&tcm_vhost_mutex);
1996         list_del(&tpg->tv_tpg_list);
1997         mutex_unlock(&tcm_vhost_mutex);
1998         /*
1999          * Release the virtual I_T Nexus for this vhost TPG
2000          */
2001         tcm_vhost_drop_nexus(tpg);
2002         /*
2003          * Deregister the se_tpg from TCM..
2004          */
2005         core_tpg_deregister(se_tpg);
2006         kfree(tpg);
2007 }
2008
2009 static struct se_wwn *
2010 tcm_vhost_make_tport(struct target_fabric_configfs *tf,
2011                      struct config_group *group,
2012                      const char *name)
2013 {
2014         struct tcm_vhost_tport *tport;
2015         char *ptr;
2016         u64 wwpn = 0;
2017         int off = 0;
2018
2019         /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
2020                 return ERR_PTR(-EINVAL); */
2021
2022         tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
2023         if (!tport) {
2024                 pr_err("Unable to allocate struct tcm_vhost_tport");
2025                 return ERR_PTR(-ENOMEM);
2026         }
2027         tport->tport_wwpn = wwpn;
2028         /*
2029          * Determine the emulated Protocol Identifier and Target Port Name
2030          * based on the incoming configfs directory name.
2031          */
2032         ptr = strstr(name, "naa.");
2033         if (ptr) {
2034                 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
2035                 goto check_len;
2036         }
2037         ptr = strstr(name, "fc.");
2038         if (ptr) {
2039                 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
2040                 off = 3; /* Skip over "fc." */
2041                 goto check_len;
2042         }
2043         ptr = strstr(name, "iqn.");
2044         if (ptr) {
2045                 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
2046                 goto check_len;
2047         }
2048
2049         pr_err("Unable to locate prefix for emulated Target Port:"
2050                         " %s\n", name);
2051         kfree(tport);
2052         return ERR_PTR(-EINVAL);
2053
2054 check_len:
2055         if (strlen(name) >= TCM_VHOST_NAMELEN) {
2056                 pr_err("Emulated %s Address: %s, exceeds"
2057                         " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
2058                         TCM_VHOST_NAMELEN);
2059                 kfree(tport);
2060                 return ERR_PTR(-EINVAL);
2061         }
2062         snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
2063
2064         pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
2065                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
2066
2067         return &tport->tport_wwn;
2068 }
2069
2070 static void tcm_vhost_drop_tport(struct se_wwn *wwn)
2071 {
2072         struct tcm_vhost_tport *tport = container_of(wwn,
2073                                 struct tcm_vhost_tport, tport_wwn);
2074
2075         pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
2076                 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
2077                 tport->tport_name);
2078
2079         kfree(tport);
2080 }
2081
2082 static ssize_t
2083 tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
2084                                 char *page)
2085 {
2086         return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
2087                 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2088                 utsname()->machine);
2089 }
2090
2091 TF_WWN_ATTR_RO(tcm_vhost, version);
2092
2093 static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
2094         &tcm_vhost_wwn_version.attr,
2095         NULL,
2096 };
2097
2098 static struct target_core_fabric_ops tcm_vhost_ops = {
2099         .get_fabric_name                = tcm_vhost_get_fabric_name,
2100         .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
2101         .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
2102         .tpg_get_tag                    = tcm_vhost_get_tag,
2103         .tpg_get_default_depth          = tcm_vhost_get_default_depth,
2104         .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
2105         .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
2106         .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
2107         .tpg_check_demo_mode            = tcm_vhost_check_true,
2108         .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
2109         .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
2110         .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
2111         .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
2112         .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
2113         .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
2114         .release_cmd                    = tcm_vhost_release_cmd,
2115         .check_stop_free                = vhost_scsi_check_stop_free,
2116         .shutdown_session               = tcm_vhost_shutdown_session,
2117         .close_session                  = tcm_vhost_close_session,
2118         .sess_get_index                 = tcm_vhost_sess_get_index,
2119         .sess_get_initiator_sid         = NULL,
2120         .write_pending                  = tcm_vhost_write_pending,
2121         .write_pending_status           = tcm_vhost_write_pending_status,
2122         .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
2123         .get_task_tag                   = tcm_vhost_get_task_tag,
2124         .get_cmd_state                  = tcm_vhost_get_cmd_state,
2125         .queue_data_in                  = tcm_vhost_queue_data_in,
2126         .queue_status                   = tcm_vhost_queue_status,
2127         .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
2128         /*
2129          * Setup callers for generic logic in target_core_fabric_configfs.c
2130          */
2131         .fabric_make_wwn                = tcm_vhost_make_tport,
2132         .fabric_drop_wwn                = tcm_vhost_drop_tport,
2133         .fabric_make_tpg                = tcm_vhost_make_tpg,
2134         .fabric_drop_tpg                = tcm_vhost_drop_tpg,
2135         .fabric_post_link               = tcm_vhost_port_link,
2136         .fabric_pre_unlink              = tcm_vhost_port_unlink,
2137         .fabric_make_np                 = NULL,
2138         .fabric_drop_np                 = NULL,
2139         .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
2140         .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
2141 };
2142
2143 static int tcm_vhost_register_configfs(void)
2144 {
2145         struct target_fabric_configfs *fabric;
2146         int ret;
2147
2148         pr_debug("TCM_VHOST fabric module %s on %s/%s"
2149                 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
2150                 utsname()->machine);
2151         /*
2152          * Register the top level struct config_item_type with TCM core
2153          */
2154         fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
2155         if (IS_ERR(fabric)) {
2156                 pr_err("target_fabric_configfs_init() failed\n");
2157                 return PTR_ERR(fabric);
2158         }
2159         /*
2160          * Setup fabric->tf_ops from our local tcm_vhost_ops
2161          */
2162         fabric->tf_ops = tcm_vhost_ops;
2163         /*
2164          * Setup default attribute lists for various fabric->tf_cit_tmpl
2165          */
2166         fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
2167         fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
2168         fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
2169         fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
2170         fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
2171         fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
2172         fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
2173         fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
2174         fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
2175         /*
2176          * Register the fabric for use within TCM
2177          */
2178         ret = target_fabric_configfs_register(fabric);
2179         if (ret < 0) {
2180                 pr_err("target_fabric_configfs_register() failed"
2181                                 " for TCM_VHOST\n");
2182                 return ret;
2183         }
2184         /*
2185          * Setup our local pointer to *fabric
2186          */
2187         tcm_vhost_fabric_configfs = fabric;
2188         pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
2189         return 0;
2190 };
2191
2192 static void tcm_vhost_deregister_configfs(void)
2193 {
2194         if (!tcm_vhost_fabric_configfs)
2195                 return;
2196
2197         target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
2198         tcm_vhost_fabric_configfs = NULL;
2199         pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
2200 };
2201
2202 static int __init tcm_vhost_init(void)
2203 {
2204         int ret = -ENOMEM;
2205         /*
2206          * Use our own dedicated workqueue for submitting I/O into
2207          * target core to avoid contention within system_wq.
2208          */
2209         tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
2210         if (!tcm_vhost_workqueue)
2211                 goto out;
2212
2213         ret = vhost_scsi_register();
2214         if (ret < 0)
2215                 goto out_destroy_workqueue;
2216
2217         ret = tcm_vhost_register_configfs();
2218         if (ret < 0)
2219                 goto out_vhost_scsi_deregister;
2220
2221         return 0;
2222
2223 out_vhost_scsi_deregister:
2224         vhost_scsi_deregister();
2225 out_destroy_workqueue:
2226         destroy_workqueue(tcm_vhost_workqueue);
2227 out:
2228         return ret;
2229 };
2230
2231 static void tcm_vhost_exit(void)
2232 {
2233         tcm_vhost_deregister_configfs();
2234         vhost_scsi_deregister();
2235         destroy_workqueue(tcm_vhost_workqueue);
2236 };
2237
2238 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
2239 MODULE_ALIAS("tcm_vhost");
2240 MODULE_LICENSE("GPL");
2241 module_init(tcm_vhost_init);
2242 module_exit(tcm_vhost_exit);