Merge branch 'tunnels'
[linux.git] / drivers / target / target_core_alua.c
1 /*******************************************************************************
2  * Filename:  target_core_alua.c
3  *
4  * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
5  *
6  * (c) Copyright 2009-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/configfs.h>
29 #include <linux/export.h>
30 #include <linux/file.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <asm/unaligned.h>
34
35 #include <target/target_core_base.h>
36 #include <target/target_core_backend.h>
37 #include <target/target_core_fabric.h>
38 #include <target/target_core_configfs.h>
39
40 #include "target_core_internal.h"
41 #include "target_core_alua.h"
42 #include "target_core_ua.h"
43
44 static sense_reason_t core_alua_check_transition(int state, int valid,
45                                                  int *primary);
46 static int core_alua_set_tg_pt_secondary_state(
47                 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
48                 struct se_port *port, int explicit, int offline);
49
50 static char *core_alua_dump_state(int state);
51
52 static u16 alua_lu_gps_counter;
53 static u32 alua_lu_gps_count;
54
55 static DEFINE_SPINLOCK(lu_gps_lock);
56 static LIST_HEAD(lu_gps_list);
57
58 struct t10_alua_lu_gp *default_lu_gp;
59
60 /*
61  * REPORT REFERRALS
62  *
63  * See sbc3r35 section 5.23
64  */
65 sense_reason_t
66 target_emulate_report_referrals(struct se_cmd *cmd)
67 {
68         struct se_device *dev = cmd->se_dev;
69         struct t10_alua_lba_map *map;
70         struct t10_alua_lba_map_member *map_mem;
71         unsigned char *buf;
72         u32 rd_len = 0, off;
73
74         if (cmd->data_length < 4) {
75                 pr_warn("REPORT REFERRALS allocation length %u too"
76                         " small\n", cmd->data_length);
77                 return TCM_INVALID_CDB_FIELD;
78         }
79
80         buf = transport_kmap_data_sg(cmd);
81         if (!buf)
82                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
83
84         off = 4;
85         spin_lock(&dev->t10_alua.lba_map_lock);
86         if (list_empty(&dev->t10_alua.lba_map_list)) {
87                 spin_unlock(&dev->t10_alua.lba_map_lock);
88                 transport_kunmap_data_sg(cmd);
89
90                 return TCM_UNSUPPORTED_SCSI_OPCODE;
91         }
92
93         list_for_each_entry(map, &dev->t10_alua.lba_map_list,
94                             lba_map_list) {
95                 int desc_num = off + 3;
96                 int pg_num;
97
98                 off += 4;
99                 if (cmd->data_length > off)
100                         put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
101                 off += 8;
102                 if (cmd->data_length > off)
103                         put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
104                 off += 8;
105                 rd_len += 20;
106                 pg_num = 0;
107                 list_for_each_entry(map_mem, &map->lba_map_mem_list,
108                                     lba_map_mem_list) {
109                         int alua_state = map_mem->lba_map_mem_alua_state;
110                         int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
111
112                         if (cmd->data_length > off)
113                                 buf[off] = alua_state & 0x0f;
114                         off += 2;
115                         if (cmd->data_length > off)
116                                 buf[off] = (alua_pg_id >> 8) & 0xff;
117                         off++;
118                         if (cmd->data_length > off)
119                                 buf[off] = (alua_pg_id & 0xff);
120                         off++;
121                         rd_len += 4;
122                         pg_num++;
123                 }
124                 if (cmd->data_length > desc_num)
125                         buf[desc_num] = pg_num;
126         }
127         spin_unlock(&dev->t10_alua.lba_map_lock);
128
129         /*
130          * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
131          */
132         put_unaligned_be16(rd_len, &buf[2]);
133
134         transport_kunmap_data_sg(cmd);
135
136         target_complete_cmd(cmd, GOOD);
137         return 0;
138 }
139
140 /*
141  * REPORT_TARGET_PORT_GROUPS
142  *
143  * See spc4r17 section 6.27
144  */
145 sense_reason_t
146 target_emulate_report_target_port_groups(struct se_cmd *cmd)
147 {
148         struct se_device *dev = cmd->se_dev;
149         struct se_port *port;
150         struct t10_alua_tg_pt_gp *tg_pt_gp;
151         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
152         unsigned char *buf;
153         u32 rd_len = 0, off;
154         int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
155
156         /*
157          * Skip over RESERVED area to first Target port group descriptor
158          * depending on the PARAMETER DATA FORMAT type..
159          */
160         if (ext_hdr != 0)
161                 off = 8;
162         else
163                 off = 4;
164
165         if (cmd->data_length < off) {
166                 pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
167                         " small for %s header\n", cmd->data_length,
168                         (ext_hdr) ? "extended" : "normal");
169                 return TCM_INVALID_CDB_FIELD;
170         }
171         buf = transport_kmap_data_sg(cmd);
172         if (!buf)
173                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
174
175         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
176         list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
177                         tg_pt_gp_list) {
178                 /*
179                  * Check if the Target port group and Target port descriptor list
180                  * based on tg_pt_gp_members count will fit into the response payload.
181                  * Otherwise, bump rd_len to let the initiator know we have exceeded
182                  * the allocation length and the response is truncated.
183                  */
184                 if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) >
185                      cmd->data_length) {
186                         rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4);
187                         continue;
188                 }
189                 /*
190                  * PREF: Preferred target port bit, determine if this
191                  * bit should be set for port group.
192                  */
193                 if (tg_pt_gp->tg_pt_gp_pref)
194                         buf[off] = 0x80;
195                 /*
196                  * Set the ASYMMETRIC ACCESS State
197                  */
198                 buf[off++] |= (atomic_read(
199                         &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
200                 /*
201                  * Set supported ASYMMETRIC ACCESS State bits
202                  */
203                 buf[off++] |= tg_pt_gp->tg_pt_gp_alua_supported_states;
204                 /*
205                  * TARGET PORT GROUP
206                  */
207                 buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
208                 buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
209
210                 off++; /* Skip over Reserved */
211                 /*
212                  * STATUS CODE
213                  */
214                 buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
215                 /*
216                  * Vendor Specific field
217                  */
218                 buf[off++] = 0x00;
219                 /*
220                  * TARGET PORT COUNT
221                  */
222                 buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
223                 rd_len += 8;
224
225                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
226                 list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
227                                 tg_pt_gp_mem_list) {
228                         port = tg_pt_gp_mem->tg_pt;
229                         /*
230                          * Start Target Port descriptor format
231                          *
232                          * See spc4r17 section 6.2.7 Table 247
233                          */
234                         off += 2; /* Skip over Obsolete */
235                         /*
236                          * Set RELATIVE TARGET PORT IDENTIFIER
237                          */
238                         buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
239                         buf[off++] = (port->sep_rtpi & 0xff);
240                         rd_len += 4;
241                 }
242                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
243         }
244         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
245         /*
246          * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
247          */
248         put_unaligned_be32(rd_len, &buf[0]);
249
250         /*
251          * Fill in the Extended header parameter data format if requested
252          */
253         if (ext_hdr != 0) {
254                 buf[4] = 0x10;
255                 /*
256                  * Set the implicit transition time (in seconds) for the application
257                  * client to use as a base for it's transition timeout value.
258                  *
259                  * Use the current tg_pt_gp_mem -> tg_pt_gp membership from the LUN
260                  * this CDB was received upon to determine this value individually
261                  * for ALUA target port group.
262                  */
263                 port = cmd->se_lun->lun_sep;
264                 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
265                 if (tg_pt_gp_mem) {
266                         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
267                         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
268                         if (tg_pt_gp)
269                                 buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
270                         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
271                 }
272         }
273         transport_kunmap_data_sg(cmd);
274
275         target_complete_cmd(cmd, GOOD);
276         return 0;
277 }
278
279 /*
280  * SET_TARGET_PORT_GROUPS for explicit ALUA operation.
281  *
282  * See spc4r17 section 6.35
283  */
284 sense_reason_t
285 target_emulate_set_target_port_groups(struct se_cmd *cmd)
286 {
287         struct se_device *dev = cmd->se_dev;
288         struct se_port *port, *l_port = cmd->se_lun->lun_sep;
289         struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
290         struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
291         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
292         unsigned char *buf;
293         unsigned char *ptr;
294         sense_reason_t rc = TCM_NO_SENSE;
295         u32 len = 4; /* Skip over RESERVED area in header */
296         int alua_access_state, primary = 0, valid_states;
297         u16 tg_pt_id, rtpi;
298
299         if (!l_port)
300                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
301
302         if (cmd->data_length < 4) {
303                 pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
304                         " small\n", cmd->data_length);
305                 return TCM_INVALID_PARAMETER_LIST;
306         }
307
308         buf = transport_kmap_data_sg(cmd);
309         if (!buf)
310                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
311
312         /*
313          * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
314          * for the local tg_pt_gp.
315          */
316         l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
317         if (!l_tg_pt_gp_mem) {
318                 pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
319                 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
320                 goto out;
321         }
322         spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
323         l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
324         if (!l_tg_pt_gp) {
325                 spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
326                 pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
327                 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
328                 goto out;
329         }
330         spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
331
332         if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
333                 pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
334                                 " while TPGS_EXPLICIT_ALUA is disabled\n");
335                 rc = TCM_UNSUPPORTED_SCSI_OPCODE;
336                 goto out;
337         }
338         valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
339
340         ptr = &buf[4]; /* Skip over RESERVED area in header */
341
342         while (len < cmd->data_length) {
343                 bool found = false;
344                 alua_access_state = (ptr[0] & 0x0f);
345                 /*
346                  * Check the received ALUA access state, and determine if
347                  * the state is a primary or secondary target port asymmetric
348                  * access state.
349                  */
350                 rc = core_alua_check_transition(alua_access_state,
351                                                 valid_states, &primary);
352                 if (rc) {
353                         /*
354                          * If the SET TARGET PORT GROUPS attempts to establish
355                          * an invalid combination of target port asymmetric
356                          * access states or attempts to establish an
357                          * unsupported target port asymmetric access state,
358                          * then the command shall be terminated with CHECK
359                          * CONDITION status, with the sense key set to ILLEGAL
360                          * REQUEST, and the additional sense code set to INVALID
361                          * FIELD IN PARAMETER LIST.
362                          */
363                         goto out;
364                 }
365
366                 /*
367                  * If the ASYMMETRIC ACCESS STATE field (see table 267)
368                  * specifies a primary target port asymmetric access state,
369                  * then the TARGET PORT GROUP OR TARGET PORT field specifies
370                  * a primary target port group for which the primary target
371                  * port asymmetric access state shall be changed. If the
372                  * ASYMMETRIC ACCESS STATE field specifies a secondary target
373                  * port asymmetric access state, then the TARGET PORT GROUP OR
374                  * TARGET PORT field specifies the relative target port
375                  * identifier (see 3.1.120) of the target port for which the
376                  * secondary target port asymmetric access state shall be
377                  * changed.
378                  */
379                 if (primary) {
380                         tg_pt_id = get_unaligned_be16(ptr + 2);
381                         /*
382                          * Locate the matching target port group ID from
383                          * the global tg_pt_gp list
384                          */
385                         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
386                         list_for_each_entry(tg_pt_gp,
387                                         &dev->t10_alua.tg_pt_gps_list,
388                                         tg_pt_gp_list) {
389                                 if (!tg_pt_gp->tg_pt_gp_valid_id)
390                                         continue;
391
392                                 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
393                                         continue;
394
395                                 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
396                                 smp_mb__after_atomic_inc();
397
398                                 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399
400                                 if (!core_alua_do_port_transition(tg_pt_gp,
401                                                 dev, l_port, nacl,
402                                                 alua_access_state, 1))
403                                         found = true;
404
405                                 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406                                 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
407                                 smp_mb__after_atomic_dec();
408                                 break;
409                         }
410                         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
411                 } else {
412                         /*
413                          * Extract the RELATIVE TARGET PORT IDENTIFIER to identify
414                          * the Target Port in question for the the incoming
415                          * SET_TARGET_PORT_GROUPS op.
416                          */
417                         rtpi = get_unaligned_be16(ptr + 2);
418                         /*
419                          * Locate the matching relative target port identifier
420                          * for the struct se_device storage object.
421                          */
422                         spin_lock(&dev->se_port_lock);
423                         list_for_each_entry(port, &dev->dev_sep_list,
424                                                         sep_list) {
425                                 if (port->sep_rtpi != rtpi)
426                                         continue;
427
428                                 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
429
430                                 spin_unlock(&dev->se_port_lock);
431
432                                 if (!core_alua_set_tg_pt_secondary_state(
433                                                 tg_pt_gp_mem, port, 1, 1))
434                                         found = true;
435
436                                 spin_lock(&dev->se_port_lock);
437                                 break;
438                         }
439                         spin_unlock(&dev->se_port_lock);
440                 }
441
442                 if (!found) {
443                         rc = TCM_INVALID_PARAMETER_LIST;
444                         goto out;
445                 }
446
447                 ptr += 4;
448                 len += 4;
449         }
450
451 out:
452         transport_kunmap_data_sg(cmd);
453         if (!rc)
454                 target_complete_cmd(cmd, GOOD);
455         return rc;
456 }
457
458 static inline int core_alua_state_nonoptimized(
459         struct se_cmd *cmd,
460         unsigned char *cdb,
461         int nonop_delay_msecs,
462         u8 *alua_ascq)
463 {
464         /*
465          * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
466          * later to determine if processing of this cmd needs to be
467          * temporarily delayed for the Active/NonOptimized primary access state.
468          */
469         cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
470         cmd->alua_nonop_delay = nonop_delay_msecs;
471         return 0;
472 }
473
474 static inline int core_alua_state_lba_dependent(
475         struct se_cmd *cmd,
476         struct t10_alua_tg_pt_gp *tg_pt_gp,
477         u8 *alua_ascq)
478 {
479         struct se_device *dev = cmd->se_dev;
480         u64 segment_size, segment_mult, sectors, lba;
481
482         /* Only need to check for cdb actually containing LBAs */
483         if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
484                 return 0;
485
486         spin_lock(&dev->t10_alua.lba_map_lock);
487         segment_size = dev->t10_alua.lba_map_segment_size;
488         segment_mult = dev->t10_alua.lba_map_segment_multiplier;
489         sectors = cmd->data_length / dev->dev_attrib.block_size;
490
491         lba = cmd->t_task_lba;
492         while (lba < cmd->t_task_lba + sectors) {
493                 struct t10_alua_lba_map *cur_map = NULL, *map;
494                 struct t10_alua_lba_map_member *map_mem;
495
496                 list_for_each_entry(map, &dev->t10_alua.lba_map_list,
497                                     lba_map_list) {
498                         u64 start_lba, last_lba;
499                         u64 first_lba = map->lba_map_first_lba;
500
501                         if (segment_mult) {
502                                 u64 tmp = lba;
503                                 start_lba = do_div(tmp, segment_size * segment_mult);
504
505                                 last_lba = first_lba + segment_size - 1;
506                                 if (start_lba >= first_lba &&
507                                     start_lba <= last_lba) {
508                                         lba += segment_size;
509                                         cur_map = map;
510                                         break;
511                                 }
512                         } else {
513                                 last_lba = map->lba_map_last_lba;
514                                 if (lba >= first_lba && lba <= last_lba) {
515                                         lba = last_lba + 1;
516                                         cur_map = map;
517                                         break;
518                                 }
519                         }
520                 }
521                 if (!cur_map) {
522                         spin_unlock(&dev->t10_alua.lba_map_lock);
523                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
524                         return 1;
525                 }
526                 list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
527                                     lba_map_mem_list) {
528                         if (map_mem->lba_map_mem_alua_pg_id !=
529                             tg_pt_gp->tg_pt_gp_id)
530                                 continue;
531                         switch(map_mem->lba_map_mem_alua_state) {
532                         case ALUA_ACCESS_STATE_STANDBY:
533                                 spin_unlock(&dev->t10_alua.lba_map_lock);
534                                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
535                                 return 1;
536                         case ALUA_ACCESS_STATE_UNAVAILABLE:
537                                 spin_unlock(&dev->t10_alua.lba_map_lock);
538                                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
539                                 return 1;
540                         default:
541                                 break;
542                         }
543                 }
544         }
545         spin_unlock(&dev->t10_alua.lba_map_lock);
546         return 0;
547 }
548
549 static inline int core_alua_state_standby(
550         struct se_cmd *cmd,
551         unsigned char *cdb,
552         u8 *alua_ascq)
553 {
554         /*
555          * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
556          * spc4r17 section 5.9.2.4.4
557          */
558         switch (cdb[0]) {
559         case INQUIRY:
560         case LOG_SELECT:
561         case LOG_SENSE:
562         case MODE_SELECT:
563         case MODE_SENSE:
564         case REPORT_LUNS:
565         case RECEIVE_DIAGNOSTIC:
566         case SEND_DIAGNOSTIC:
567                 return 0;
568         case MAINTENANCE_IN:
569                 switch (cdb[1] & 0x1f) {
570                 case MI_REPORT_TARGET_PGS:
571                         return 0;
572                 default:
573                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
574                         return 1;
575                 }
576         case MAINTENANCE_OUT:
577                 switch (cdb[1]) {
578                 case MO_SET_TARGET_PGS:
579                         return 0;
580                 default:
581                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
582                         return 1;
583                 }
584         case REQUEST_SENSE:
585         case PERSISTENT_RESERVE_IN:
586         case PERSISTENT_RESERVE_OUT:
587         case READ_BUFFER:
588         case WRITE_BUFFER:
589                 return 0;
590         default:
591                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
592                 return 1;
593         }
594
595         return 0;
596 }
597
598 static inline int core_alua_state_unavailable(
599         struct se_cmd *cmd,
600         unsigned char *cdb,
601         u8 *alua_ascq)
602 {
603         /*
604          * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
605          * spc4r17 section 5.9.2.4.5
606          */
607         switch (cdb[0]) {
608         case INQUIRY:
609         case REPORT_LUNS:
610                 return 0;
611         case MAINTENANCE_IN:
612                 switch (cdb[1] & 0x1f) {
613                 case MI_REPORT_TARGET_PGS:
614                         return 0;
615                 default:
616                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
617                         return 1;
618                 }
619         case MAINTENANCE_OUT:
620                 switch (cdb[1]) {
621                 case MO_SET_TARGET_PGS:
622                         return 0;
623                 default:
624                         *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
625                         return 1;
626                 }
627         case REQUEST_SENSE:
628         case READ_BUFFER:
629         case WRITE_BUFFER:
630                 return 0;
631         default:
632                 *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
633                 return 1;
634         }
635
636         return 0;
637 }
638
639 static inline int core_alua_state_transition(
640         struct se_cmd *cmd,
641         unsigned char *cdb,
642         u8 *alua_ascq)
643 {
644         /*
645          * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
646          * spc4r17 section 5.9.2.5
647          */
648         switch (cdb[0]) {
649         case INQUIRY:
650         case REPORT_LUNS:
651                 return 0;
652         case MAINTENANCE_IN:
653                 switch (cdb[1] & 0x1f) {
654                 case MI_REPORT_TARGET_PGS:
655                         return 0;
656                 default:
657                         *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
658                         return 1;
659                 }
660         case REQUEST_SENSE:
661         case READ_BUFFER:
662         case WRITE_BUFFER:
663                 return 0;
664         default:
665                 *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
666                 return 1;
667         }
668
669         return 0;
670 }
671
672 /*
673  * return 1: Is used to signal LUN not accessible, and check condition/not ready
674  * return 0: Used to signal success
675  * return -1: Used to signal failure, and invalid cdb field
676  */
677 sense_reason_t
678 target_alua_state_check(struct se_cmd *cmd)
679 {
680         struct se_device *dev = cmd->se_dev;
681         unsigned char *cdb = cmd->t_task_cdb;
682         struct se_lun *lun = cmd->se_lun;
683         struct se_port *port = lun->lun_sep;
684         struct t10_alua_tg_pt_gp *tg_pt_gp;
685         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
686         int out_alua_state, nonop_delay_msecs;
687         u8 alua_ascq;
688         int ret;
689
690         if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
691                 return 0;
692         if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
693                 return 0;
694
695         if (!port)
696                 return 0;
697         /*
698          * First, check for a struct se_port specific secondary ALUA target port
699          * access state: OFFLINE
700          */
701         if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
702                 pr_debug("ALUA: Got secondary offline status for local"
703                                 " target port\n");
704                 alua_ascq = ASCQ_04H_ALUA_OFFLINE;
705                 ret = 1;
706                 goto out;
707         }
708          /*
709          * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
710          * ALUA target port group, to obtain current ALUA access state.
711          * Otherwise look for the underlying struct se_device association with
712          * a ALUA logical unit group.
713          */
714         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
715         if (!tg_pt_gp_mem)
716                 return 0;
717
718         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
719         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
720         out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
721         nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
722         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
723         /*
724          * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
725          * statement so the compiler knows explicitly to check this case first.
726          * For the Optimized ALUA access state case, we want to process the
727          * incoming fabric cmd ASAP..
728          */
729         if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED)
730                 return 0;
731
732         switch (out_alua_state) {
733         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
734                 ret = core_alua_state_nonoptimized(cmd, cdb,
735                                         nonop_delay_msecs, &alua_ascq);
736                 break;
737         case ALUA_ACCESS_STATE_STANDBY:
738                 ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
739                 break;
740         case ALUA_ACCESS_STATE_UNAVAILABLE:
741                 ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
742                 break;
743         case ALUA_ACCESS_STATE_TRANSITION:
744                 ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
745                 break;
746         case ALUA_ACCESS_STATE_LBA_DEPENDENT:
747                 ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
748                 break;
749         /*
750          * OFFLINE is a secondary ALUA target port group access state, that is
751          * handled above with struct se_port->sep_tg_pt_secondary_offline=1
752          */
753         case ALUA_ACCESS_STATE_OFFLINE:
754         default:
755                 pr_err("Unknown ALUA access state: 0x%02x\n",
756                                 out_alua_state);
757                 return TCM_INVALID_CDB_FIELD;
758         }
759
760 out:
761         if (ret > 0) {
762                 /*
763                  * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
764                  * The ALUA additional sense code qualifier (ASCQ) is determined
765                  * by the ALUA primary or secondary access state..
766                  */
767                 pr_debug("[%s]: ALUA TG Port not available, "
768                         "SenseKey: NOT_READY, ASC/ASCQ: "
769                         "0x04/0x%02x\n",
770                         cmd->se_tfo->get_fabric_name(), alua_ascq);
771
772                 cmd->scsi_asc = 0x04;
773                 cmd->scsi_ascq = alua_ascq;
774                 return TCM_CHECK_CONDITION_NOT_READY;
775         }
776
777         return 0;
778 }
779
780 /*
781  * Check implicit and explicit ALUA state change request.
782  */
783 static sense_reason_t
784 core_alua_check_transition(int state, int valid, int *primary)
785 {
786         /*
787          * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
788          * defined as primary target port asymmetric access states.
789          */
790         switch (state) {
791         case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
792                 if (!(valid & ALUA_AO_SUP))
793                         goto not_supported;
794                 *primary = 1;
795                 break;
796         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
797                 if (!(valid & ALUA_AN_SUP))
798                         goto not_supported;
799                 *primary = 1;
800                 break;
801         case ALUA_ACCESS_STATE_STANDBY:
802                 if (!(valid & ALUA_S_SUP))
803                         goto not_supported;
804                 *primary = 1;
805                 break;
806         case ALUA_ACCESS_STATE_UNAVAILABLE:
807                 if (!(valid & ALUA_U_SUP))
808                         goto not_supported;
809                 *primary = 1;
810                 break;
811         case ALUA_ACCESS_STATE_LBA_DEPENDENT:
812                 if (!(valid & ALUA_LBD_SUP))
813                         goto not_supported;
814                 *primary = 1;
815                 break;
816         case ALUA_ACCESS_STATE_OFFLINE:
817                 /*
818                  * OFFLINE state is defined as a secondary target port
819                  * asymmetric access state.
820                  */
821                 if (!(valid & ALUA_O_SUP))
822                         goto not_supported;
823                 *primary = 0;
824                 break;
825         case ALUA_ACCESS_STATE_TRANSITION:
826                 /*
827                  * Transitioning is set internally, and
828                  * cannot be selected manually.
829                  */
830                 goto not_supported;
831         default:
832                 pr_err("Unknown ALUA access state: 0x%02x\n", state);
833                 return TCM_INVALID_PARAMETER_LIST;
834         }
835
836         return 0;
837
838 not_supported:
839         pr_err("ALUA access state %s not supported",
840                core_alua_dump_state(state));
841         return TCM_INVALID_PARAMETER_LIST;
842 }
843
844 static char *core_alua_dump_state(int state)
845 {
846         switch (state) {
847         case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
848                 return "Active/Optimized";
849         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
850                 return "Active/NonOptimized";
851         case ALUA_ACCESS_STATE_LBA_DEPENDENT:
852                 return "LBA Dependent";
853         case ALUA_ACCESS_STATE_STANDBY:
854                 return "Standby";
855         case ALUA_ACCESS_STATE_UNAVAILABLE:
856                 return "Unavailable";
857         case ALUA_ACCESS_STATE_OFFLINE:
858                 return "Offline";
859         case ALUA_ACCESS_STATE_TRANSITION:
860                 return "Transitioning";
861         default:
862                 return "Unknown";
863         }
864
865         return NULL;
866 }
867
868 char *core_alua_dump_status(int status)
869 {
870         switch (status) {
871         case ALUA_STATUS_NONE:
872                 return "None";
873         case ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG:
874                 return "Altered by Explicit STPG";
875         case ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA:
876                 return "Altered by Implicit ALUA";
877         default:
878                 return "Unknown";
879         }
880
881         return NULL;
882 }
883
884 /*
885  * Used by fabric modules to determine when we need to delay processing
886  * for the Active/NonOptimized paths..
887  */
888 int core_alua_check_nonop_delay(
889         struct se_cmd *cmd)
890 {
891         if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
892                 return 0;
893         if (in_interrupt())
894                 return 0;
895         /*
896          * The ALUA Active/NonOptimized access state delay can be disabled
897          * in via configfs with a value of zero
898          */
899         if (!cmd->alua_nonop_delay)
900                 return 0;
901         /*
902          * struct se_cmd->alua_nonop_delay gets set by a target port group
903          * defined interval in core_alua_state_nonoptimized()
904          */
905         msleep_interruptible(cmd->alua_nonop_delay);
906         return 0;
907 }
908 EXPORT_SYMBOL(core_alua_check_nonop_delay);
909
910 /*
911  * Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
912  *
913  */
914 static int core_alua_write_tpg_metadata(
915         const char *path,
916         unsigned char *md_buf,
917         u32 md_buf_len)
918 {
919         struct file *file = filp_open(path, O_RDWR | O_CREAT | O_TRUNC, 0600);
920         int ret;
921
922         if (IS_ERR(file)) {
923                 pr_err("filp_open(%s) for ALUA metadata failed\n", path);
924                 return -ENODEV;
925         }
926         ret = kernel_write(file, md_buf, md_buf_len, 0);
927         if (ret < 0)
928                 pr_err("Error writing ALUA metadata file: %s\n", path);
929         fput(file);
930         return (ret < 0) ? -EIO : 0;
931 }
932
933 /*
934  * Called with tg_pt_gp->tg_pt_gp_md_mutex held
935  */
936 static int core_alua_update_tpg_primary_metadata(
937         struct t10_alua_tg_pt_gp *tg_pt_gp)
938 {
939         unsigned char *md_buf;
940         struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
941         char path[ALUA_METADATA_PATH_LEN];
942         int len, rc;
943
944         md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
945         if (!md_buf) {
946                 pr_err("Unable to allocate buf for ALUA metadata\n");
947                 return -ENOMEM;
948         }
949
950         memset(path, 0, ALUA_METADATA_PATH_LEN);
951
952         len = snprintf(md_buf, ALUA_MD_BUF_LEN,
953                         "tg_pt_gp_id=%hu\n"
954                         "alua_access_state=0x%02x\n"
955                         "alua_access_status=0x%02x\n",
956                         tg_pt_gp->tg_pt_gp_id,
957                         tg_pt_gp->tg_pt_gp_alua_pending_state,
958                         tg_pt_gp->tg_pt_gp_alua_access_status);
959
960         snprintf(path, ALUA_METADATA_PATH_LEN,
961                 "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
962                 config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
963
964         rc = core_alua_write_tpg_metadata(path, md_buf, len);
965         kfree(md_buf);
966         return rc;
967 }
968
969 static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
970 {
971         struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
972                 struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
973         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
974         struct se_dev_entry *se_deve;
975         struct se_lun_acl *lacl;
976         struct se_port *port;
977         struct t10_alua_tg_pt_gp_member *mem;
978         bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
979                          ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
980
981         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
982         list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
983                                 tg_pt_gp_mem_list) {
984                 port = mem->tg_pt;
985                 /*
986                  * After an implicit target port asymmetric access state
987                  * change, a device server shall establish a unit attention
988                  * condition for the initiator port associated with every I_T
989                  * nexus with the additional sense code set to ASYMMETRIC
990                  * ACCESS STATE CHANGED.
991                  *
992                  * After an explicit target port asymmetric access state
993                  * change, a device server shall establish a unit attention
994                  * condition with the additional sense code set to ASYMMETRIC
995                  * ACCESS STATE CHANGED for the initiator port associated with
996                  * every I_T nexus other than the I_T nexus on which the SET
997                  * TARGET PORT GROUPS command
998                  */
999                 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
1000                 smp_mb__after_atomic_inc();
1001                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1002
1003                 spin_lock_bh(&port->sep_alua_lock);
1004                 list_for_each_entry(se_deve, &port->sep_alua_list,
1005                                         alua_port_list) {
1006                         lacl = se_deve->se_lun_acl;
1007                         /*
1008                          * se_deve->se_lun_acl pointer may be NULL for a
1009                          * entry created without explicit Node+MappedLUN ACLs
1010                          */
1011                         if (!lacl)
1012                                 continue;
1013
1014                         if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
1015                              ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
1016                            (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
1017                             (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
1018                            (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
1019                             (tg_pt_gp->tg_pt_gp_alua_port == port))
1020                                 continue;
1021
1022                         core_scsi3_ua_allocate(lacl->se_lun_nacl,
1023                                 se_deve->mapped_lun, 0x2A,
1024                                 ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
1025                 }
1026                 spin_unlock_bh(&port->sep_alua_lock);
1027
1028                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1029                 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
1030                 smp_mb__after_atomic_dec();
1031         }
1032         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1033         /*
1034          * Update the ALUA metadata buf that has been allocated in
1035          * core_alua_do_port_transition(), this metadata will be written
1036          * to struct file.
1037          *
1038          * Note that there is the case where we do not want to update the
1039          * metadata when the saved metadata is being parsed in userspace
1040          * when setting the existing port access state and access status.
1041          *
1042          * Also note that the failure to write out the ALUA metadata to
1043          * struct file does NOT affect the actual ALUA transition.
1044          */
1045         if (tg_pt_gp->tg_pt_gp_write_metadata) {
1046                 mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
1047                 core_alua_update_tpg_primary_metadata(tg_pt_gp);
1048                 mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
1049         }
1050         /*
1051          * Set the current primary ALUA access state to the requested new state
1052          */
1053         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1054                    tg_pt_gp->tg_pt_gp_alua_pending_state);
1055
1056         pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1057                 " from primary access state %s to %s\n", (explicit) ? "explicit" :
1058                 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1059                 tg_pt_gp->tg_pt_gp_id,
1060                 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
1061                 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1062         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1063         atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1064         smp_mb__after_atomic_dec();
1065         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1066
1067         if (tg_pt_gp->tg_pt_gp_transition_complete)
1068                 complete(tg_pt_gp->tg_pt_gp_transition_complete);
1069 }
1070
1071 static int core_alua_do_transition_tg_pt(
1072         struct t10_alua_tg_pt_gp *tg_pt_gp,
1073         int new_state,
1074         int explicit)
1075 {
1076         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1077         DECLARE_COMPLETION_ONSTACK(wait);
1078
1079         /* Nothing to be done here */
1080         if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
1081                 return 0;
1082
1083         if (new_state == ALUA_ACCESS_STATE_TRANSITION)
1084                 return -EAGAIN;
1085
1086         /*
1087          * Flush any pending transitions
1088          */
1089         if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
1090             atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
1091             ALUA_ACCESS_STATE_TRANSITION) {
1092                 /* Just in case */
1093                 tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1094                 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1095                 flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1096                 wait_for_completion(&wait);
1097                 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1098                 return 0;
1099         }
1100
1101         /*
1102          * Save the old primary ALUA access state, and set the current state
1103          * to ALUA_ACCESS_STATE_TRANSITION.
1104          */
1105         tg_pt_gp->tg_pt_gp_alua_previous_state =
1106                 atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
1107         tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
1108
1109         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1110                         ALUA_ACCESS_STATE_TRANSITION);
1111         tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
1112                                 ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1113                                 ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1114
1115         /*
1116          * Check for the optional ALUA primary state transition delay
1117          */
1118         if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
1119                 msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
1120
1121         /*
1122          * Take a reference for workqueue item
1123          */
1124         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1125         atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1126         smp_mb__after_atomic_inc();
1127         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1128
1129         if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
1130                 unsigned long transition_tmo;
1131
1132                 transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
1133                 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1134                                    &tg_pt_gp->tg_pt_gp_transition_work,
1135                                    transition_tmo);
1136         } else {
1137                 tg_pt_gp->tg_pt_gp_transition_complete = &wait;
1138                 queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
1139                                    &tg_pt_gp->tg_pt_gp_transition_work, 0);
1140                 wait_for_completion(&wait);
1141                 tg_pt_gp->tg_pt_gp_transition_complete = NULL;
1142         }
1143
1144         return 0;
1145 }
1146
1147 int core_alua_do_port_transition(
1148         struct t10_alua_tg_pt_gp *l_tg_pt_gp,
1149         struct se_device *l_dev,
1150         struct se_port *l_port,
1151         struct se_node_acl *l_nacl,
1152         int new_state,
1153         int explicit)
1154 {
1155         struct se_device *dev;
1156         struct t10_alua_lu_gp *lu_gp;
1157         struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
1158         struct t10_alua_tg_pt_gp *tg_pt_gp;
1159         int primary, valid_states, rc = 0;
1160
1161         valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
1162         if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
1163                 return -EINVAL;
1164
1165         local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
1166         spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1167         lu_gp = local_lu_gp_mem->lu_gp;
1168         atomic_inc(&lu_gp->lu_gp_ref_cnt);
1169         smp_mb__after_atomic_inc();
1170         spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1171         /*
1172          * For storage objects that are members of the 'default_lu_gp',
1173          * we only do transition on the passed *l_tp_pt_gp, and not
1174          * on all of the matching target port groups IDs in default_lu_gp.
1175          */
1176         if (!lu_gp->lu_gp_id) {
1177                 /*
1178                  * core_alua_do_transition_tg_pt() will always return
1179                  * success.
1180                  */
1181                 l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
1182                 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1183                 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1184                                                    new_state, explicit);
1185                 atomic_dec(&lu_gp->lu_gp_ref_cnt);
1186                 smp_mb__after_atomic_dec();
1187                 return rc;
1188         }
1189         /*
1190          * For all other LU groups aside from 'default_lu_gp', walk all of
1191          * the associated storage objects looking for a matching target port
1192          * group ID from the local target port group.
1193          */
1194         spin_lock(&lu_gp->lu_gp_lock);
1195         list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
1196                                 lu_gp_mem_list) {
1197
1198                 dev = lu_gp_mem->lu_gp_mem_dev;
1199                 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
1200                 smp_mb__after_atomic_inc();
1201                 spin_unlock(&lu_gp->lu_gp_lock);
1202
1203                 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1204                 list_for_each_entry(tg_pt_gp,
1205                                 &dev->t10_alua.tg_pt_gps_list,
1206                                 tg_pt_gp_list) {
1207
1208                         if (!tg_pt_gp->tg_pt_gp_valid_id)
1209                                 continue;
1210                         /*
1211                          * If the target behavior port asymmetric access state
1212                          * is changed for any target port group accessible via
1213                          * a logical unit within a LU group, the target port
1214                          * behavior group asymmetric access states for the same
1215                          * target port group accessible via other logical units
1216                          * in that LU group will also change.
1217                          */
1218                         if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
1219                                 continue;
1220
1221                         if (l_tg_pt_gp == tg_pt_gp) {
1222                                 tg_pt_gp->tg_pt_gp_alua_port = l_port;
1223                                 tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1224                         } else {
1225                                 tg_pt_gp->tg_pt_gp_alua_port = NULL;
1226                                 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1227                         }
1228                         atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1229                         smp_mb__after_atomic_inc();
1230                         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1231                         /*
1232                          * core_alua_do_transition_tg_pt() will always return
1233                          * success.
1234                          */
1235                         rc = core_alua_do_transition_tg_pt(tg_pt_gp,
1236                                         new_state, explicit);
1237
1238                         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1239                         atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1240                         smp_mb__after_atomic_dec();
1241                         if (rc)
1242                                 break;
1243                 }
1244                 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1245
1246                 spin_lock(&lu_gp->lu_gp_lock);
1247                 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
1248                 smp_mb__after_atomic_dec();
1249         }
1250         spin_unlock(&lu_gp->lu_gp_lock);
1251
1252         if (!rc) {
1253                 pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
1254                          " Group IDs: %hu %s transition to primary state: %s\n",
1255                          config_item_name(&lu_gp->lu_gp_group.cg_item),
1256                          l_tg_pt_gp->tg_pt_gp_id,
1257                          (explicit) ? "explicit" : "implicit",
1258                          core_alua_dump_state(new_state));
1259         }
1260
1261         atomic_dec(&lu_gp->lu_gp_ref_cnt);
1262         smp_mb__after_atomic_dec();
1263         return rc;
1264 }
1265
1266 /*
1267  * Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
1268  */
1269 static int core_alua_update_tpg_secondary_metadata(
1270         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1271         struct se_port *port)
1272 {
1273         unsigned char *md_buf;
1274         struct se_portal_group *se_tpg = port->sep_tpg;
1275         char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
1276         int len, rc;
1277
1278         md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
1279         if (!md_buf) {
1280                 pr_err("Unable to allocate buf for ALUA metadata\n");
1281                 return -ENOMEM;
1282         }
1283
1284         memset(path, 0, ALUA_METADATA_PATH_LEN);
1285         memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
1286
1287         len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
1288                         se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
1289
1290         if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
1291                 snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
1292                                 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
1293
1294         len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
1295                         "alua_tg_pt_status=0x%02x\n",
1296                         atomic_read(&port->sep_tg_pt_secondary_offline),
1297                         port->sep_tg_pt_secondary_stat);
1298
1299         snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
1300                         se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
1301                         port->sep_lun->unpacked_lun);
1302
1303         rc = core_alua_write_tpg_metadata(path, md_buf, len);
1304         kfree(md_buf);
1305
1306         return rc;
1307 }
1308
1309 static int core_alua_set_tg_pt_secondary_state(
1310         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1311         struct se_port *port,
1312         int explicit,
1313         int offline)
1314 {
1315         struct t10_alua_tg_pt_gp *tg_pt_gp;
1316         int trans_delay_msecs;
1317
1318         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1319         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1320         if (!tg_pt_gp) {
1321                 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1322                 pr_err("Unable to complete secondary state"
1323                                 " transition\n");
1324                 return -EINVAL;
1325         }
1326         trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
1327         /*
1328          * Set the secondary ALUA target port access state to OFFLINE
1329          * or release the previously secondary state for struct se_port
1330          */
1331         if (offline)
1332                 atomic_set(&port->sep_tg_pt_secondary_offline, 1);
1333         else
1334                 atomic_set(&port->sep_tg_pt_secondary_offline, 0);
1335
1336         port->sep_tg_pt_secondary_stat = (explicit) ?
1337                         ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
1338                         ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
1339
1340         pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
1341                 " to secondary access state: %s\n", (explicit) ? "explicit" :
1342                 "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
1343                 tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
1344
1345         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1346         /*
1347          * Do the optional transition delay after we set the secondary
1348          * ALUA access state.
1349          */
1350         if (trans_delay_msecs != 0)
1351                 msleep_interruptible(trans_delay_msecs);
1352         /*
1353          * See if we need to update the ALUA fabric port metadata for
1354          * secondary state and status
1355          */
1356         if (port->sep_tg_pt_secondary_write_md) {
1357                 mutex_lock(&port->sep_tg_pt_md_mutex);
1358                 core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
1359                 mutex_unlock(&port->sep_tg_pt_md_mutex);
1360         }
1361
1362         return 0;
1363 }
1364
1365 struct t10_alua_lba_map *
1366 core_alua_allocate_lba_map(struct list_head *list,
1367                            u64 first_lba, u64 last_lba)
1368 {
1369         struct t10_alua_lba_map *lba_map;
1370
1371         lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
1372         if (!lba_map) {
1373                 pr_err("Unable to allocate struct t10_alua_lba_map\n");
1374                 return ERR_PTR(-ENOMEM);
1375         }
1376         INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
1377         lba_map->lba_map_first_lba = first_lba;
1378         lba_map->lba_map_last_lba = last_lba;
1379
1380         list_add_tail(&lba_map->lba_map_list, list);
1381         return lba_map;
1382 }
1383
1384 int
1385 core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
1386                                int pg_id, int state)
1387 {
1388         struct t10_alua_lba_map_member *lba_map_mem;
1389
1390         list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
1391                             lba_map_mem_list) {
1392                 if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
1393                         pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
1394                         return -EINVAL;
1395                 }
1396         }
1397
1398         lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
1399         if (!lba_map_mem) {
1400                 pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
1401                 return -ENOMEM;
1402         }
1403         lba_map_mem->lba_map_mem_alua_state = state;
1404         lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
1405
1406         list_add_tail(&lba_map_mem->lba_map_mem_list,
1407                       &lba_map->lba_map_mem_list);
1408         return 0;
1409 }
1410
1411 void
1412 core_alua_free_lba_map(struct list_head *lba_list)
1413 {
1414         struct t10_alua_lba_map *lba_map, *lba_map_tmp;
1415         struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
1416
1417         list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
1418                                  lba_map_list) {
1419                 list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
1420                                          &lba_map->lba_map_mem_list,
1421                                          lba_map_mem_list) {
1422                         list_del(&lba_map_mem->lba_map_mem_list);
1423                         kmem_cache_free(t10_alua_lba_map_mem_cache,
1424                                         lba_map_mem);
1425                 }
1426                 list_del(&lba_map->lba_map_list);
1427                 kmem_cache_free(t10_alua_lba_map_cache, lba_map);
1428         }
1429 }
1430
1431 void
1432 core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
1433                       int segment_size, int segment_mult)
1434 {
1435         struct list_head old_lba_map_list;
1436         struct t10_alua_tg_pt_gp *tg_pt_gp;
1437         int activate = 0, supported;
1438
1439         INIT_LIST_HEAD(&old_lba_map_list);
1440         spin_lock(&dev->t10_alua.lba_map_lock);
1441         dev->t10_alua.lba_map_segment_size = segment_size;
1442         dev->t10_alua.lba_map_segment_multiplier = segment_mult;
1443         list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
1444         if (lba_map_list) {
1445                 list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
1446                 activate = 1;
1447         }
1448         spin_unlock(&dev->t10_alua.lba_map_lock);
1449         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1450         list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1451                             tg_pt_gp_list) {
1452
1453                 if (!tg_pt_gp->tg_pt_gp_valid_id)
1454                         continue;
1455                 supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
1456                 if (activate)
1457                         supported |= ALUA_LBD_SUP;
1458                 else
1459                         supported &= ~ALUA_LBD_SUP;
1460                 tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
1461         }
1462         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1463         core_alua_free_lba_map(&old_lba_map_list);
1464 }
1465
1466 struct t10_alua_lu_gp *
1467 core_alua_allocate_lu_gp(const char *name, int def_group)
1468 {
1469         struct t10_alua_lu_gp *lu_gp;
1470
1471         lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
1472         if (!lu_gp) {
1473                 pr_err("Unable to allocate struct t10_alua_lu_gp\n");
1474                 return ERR_PTR(-ENOMEM);
1475         }
1476         INIT_LIST_HEAD(&lu_gp->lu_gp_node);
1477         INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
1478         spin_lock_init(&lu_gp->lu_gp_lock);
1479         atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
1480
1481         if (def_group) {
1482                 lu_gp->lu_gp_id = alua_lu_gps_counter++;
1483                 lu_gp->lu_gp_valid_id = 1;
1484                 alua_lu_gps_count++;
1485         }
1486
1487         return lu_gp;
1488 }
1489
1490 int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
1491 {
1492         struct t10_alua_lu_gp *lu_gp_tmp;
1493         u16 lu_gp_id_tmp;
1494         /*
1495          * The lu_gp->lu_gp_id may only be set once..
1496          */
1497         if (lu_gp->lu_gp_valid_id) {
1498                 pr_warn("ALUA LU Group already has a valid ID,"
1499                         " ignoring request\n");
1500                 return -EINVAL;
1501         }
1502
1503         spin_lock(&lu_gps_lock);
1504         if (alua_lu_gps_count == 0x0000ffff) {
1505                 pr_err("Maximum ALUA alua_lu_gps_count:"
1506                                 " 0x0000ffff reached\n");
1507                 spin_unlock(&lu_gps_lock);
1508                 kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1509                 return -ENOSPC;
1510         }
1511 again:
1512         lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
1513                                 alua_lu_gps_counter++;
1514
1515         list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
1516                 if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
1517                         if (!lu_gp_id)
1518                                 goto again;
1519
1520                         pr_warn("ALUA Logical Unit Group ID: %hu"
1521                                 " already exists, ignoring request\n",
1522                                 lu_gp_id);
1523                         spin_unlock(&lu_gps_lock);
1524                         return -EINVAL;
1525                 }
1526         }
1527
1528         lu_gp->lu_gp_id = lu_gp_id_tmp;
1529         lu_gp->lu_gp_valid_id = 1;
1530         list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
1531         alua_lu_gps_count++;
1532         spin_unlock(&lu_gps_lock);
1533
1534         return 0;
1535 }
1536
1537 static struct t10_alua_lu_gp_member *
1538 core_alua_allocate_lu_gp_mem(struct se_device *dev)
1539 {
1540         struct t10_alua_lu_gp_member *lu_gp_mem;
1541
1542         lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
1543         if (!lu_gp_mem) {
1544                 pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
1545                 return ERR_PTR(-ENOMEM);
1546         }
1547         INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
1548         spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
1549         atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
1550
1551         lu_gp_mem->lu_gp_mem_dev = dev;
1552         dev->dev_alua_lu_gp_mem = lu_gp_mem;
1553
1554         return lu_gp_mem;
1555 }
1556
1557 void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
1558 {
1559         struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
1560         /*
1561          * Once we have reached this point, config_item_put() has
1562          * already been called from target_core_alua_drop_lu_gp().
1563          *
1564          * Here, we remove the *lu_gp from the global list so that
1565          * no associations can be made while we are releasing
1566          * struct t10_alua_lu_gp.
1567          */
1568         spin_lock(&lu_gps_lock);
1569         list_del(&lu_gp->lu_gp_node);
1570         alua_lu_gps_count--;
1571         spin_unlock(&lu_gps_lock);
1572         /*
1573          * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
1574          * in target_core_configfs.c:target_core_store_alua_lu_gp() to be
1575          * released with core_alua_put_lu_gp_from_name()
1576          */
1577         while (atomic_read(&lu_gp->lu_gp_ref_cnt))
1578                 cpu_relax();
1579         /*
1580          * Release reference to struct t10_alua_lu_gp * from all associated
1581          * struct se_device.
1582          */
1583         spin_lock(&lu_gp->lu_gp_lock);
1584         list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
1585                                 &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
1586                 if (lu_gp_mem->lu_gp_assoc) {
1587                         list_del(&lu_gp_mem->lu_gp_mem_list);
1588                         lu_gp->lu_gp_members--;
1589                         lu_gp_mem->lu_gp_assoc = 0;
1590                 }
1591                 spin_unlock(&lu_gp->lu_gp_lock);
1592                 /*
1593                  *
1594                  * lu_gp_mem is associated with a single
1595                  * struct se_device->dev_alua_lu_gp_mem, and is released when
1596                  * struct se_device is released via core_alua_free_lu_gp_mem().
1597                  *
1598                  * If the passed lu_gp does NOT match the default_lu_gp, assume
1599                  * we want to re-associate a given lu_gp_mem with default_lu_gp.
1600                  */
1601                 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1602                 if (lu_gp != default_lu_gp)
1603                         __core_alua_attach_lu_gp_mem(lu_gp_mem,
1604                                         default_lu_gp);
1605                 else
1606                         lu_gp_mem->lu_gp = NULL;
1607                 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1608
1609                 spin_lock(&lu_gp->lu_gp_lock);
1610         }
1611         spin_unlock(&lu_gp->lu_gp_lock);
1612
1613         kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
1614 }
1615
1616 void core_alua_free_lu_gp_mem(struct se_device *dev)
1617 {
1618         struct t10_alua_lu_gp *lu_gp;
1619         struct t10_alua_lu_gp_member *lu_gp_mem;
1620
1621         lu_gp_mem = dev->dev_alua_lu_gp_mem;
1622         if (!lu_gp_mem)
1623                 return;
1624
1625         while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
1626                 cpu_relax();
1627
1628         spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1629         lu_gp = lu_gp_mem->lu_gp;
1630         if (lu_gp) {
1631                 spin_lock(&lu_gp->lu_gp_lock);
1632                 if (lu_gp_mem->lu_gp_assoc) {
1633                         list_del(&lu_gp_mem->lu_gp_mem_list);
1634                         lu_gp->lu_gp_members--;
1635                         lu_gp_mem->lu_gp_assoc = 0;
1636                 }
1637                 spin_unlock(&lu_gp->lu_gp_lock);
1638                 lu_gp_mem->lu_gp = NULL;
1639         }
1640         spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
1641
1642         kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
1643 }
1644
1645 struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
1646 {
1647         struct t10_alua_lu_gp *lu_gp;
1648         struct config_item *ci;
1649
1650         spin_lock(&lu_gps_lock);
1651         list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
1652                 if (!lu_gp->lu_gp_valid_id)
1653                         continue;
1654                 ci = &lu_gp->lu_gp_group.cg_item;
1655                 if (!strcmp(config_item_name(ci), name)) {
1656                         atomic_inc(&lu_gp->lu_gp_ref_cnt);
1657                         spin_unlock(&lu_gps_lock);
1658                         return lu_gp;
1659                 }
1660         }
1661         spin_unlock(&lu_gps_lock);
1662
1663         return NULL;
1664 }
1665
1666 void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
1667 {
1668         spin_lock(&lu_gps_lock);
1669         atomic_dec(&lu_gp->lu_gp_ref_cnt);
1670         spin_unlock(&lu_gps_lock);
1671 }
1672
1673 /*
1674  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1675  */
1676 void __core_alua_attach_lu_gp_mem(
1677         struct t10_alua_lu_gp_member *lu_gp_mem,
1678         struct t10_alua_lu_gp *lu_gp)
1679 {
1680         spin_lock(&lu_gp->lu_gp_lock);
1681         lu_gp_mem->lu_gp = lu_gp;
1682         lu_gp_mem->lu_gp_assoc = 1;
1683         list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
1684         lu_gp->lu_gp_members++;
1685         spin_unlock(&lu_gp->lu_gp_lock);
1686 }
1687
1688 /*
1689  * Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
1690  */
1691 void __core_alua_drop_lu_gp_mem(
1692         struct t10_alua_lu_gp_member *lu_gp_mem,
1693         struct t10_alua_lu_gp *lu_gp)
1694 {
1695         spin_lock(&lu_gp->lu_gp_lock);
1696         list_del(&lu_gp_mem->lu_gp_mem_list);
1697         lu_gp_mem->lu_gp = NULL;
1698         lu_gp_mem->lu_gp_assoc = 0;
1699         lu_gp->lu_gp_members--;
1700         spin_unlock(&lu_gp->lu_gp_lock);
1701 }
1702
1703 struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
1704                 const char *name, int def_group)
1705 {
1706         struct t10_alua_tg_pt_gp *tg_pt_gp;
1707
1708         tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
1709         if (!tg_pt_gp) {
1710                 pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
1711                 return NULL;
1712         }
1713         INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
1714         INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
1715         mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
1716         spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
1717         atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
1718         INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
1719                           core_alua_do_transition_tg_pt_work);
1720         tg_pt_gp->tg_pt_gp_dev = dev;
1721         atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
1722                 ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
1723         /*
1724          * Enable both explicit and implicit ALUA support by default
1725          */
1726         tg_pt_gp->tg_pt_gp_alua_access_type =
1727                         TPGS_EXPLICIT_ALUA | TPGS_IMPLICIT_ALUA;
1728         /*
1729          * Set the default Active/NonOptimized Delay in milliseconds
1730          */
1731         tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
1732         tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
1733         tg_pt_gp->tg_pt_gp_implicit_trans_secs = ALUA_DEFAULT_IMPLICIT_TRANS_SECS;
1734
1735         /*
1736          * Enable all supported states
1737          */
1738         tg_pt_gp->tg_pt_gp_alua_supported_states =
1739             ALUA_T_SUP | ALUA_O_SUP |
1740             ALUA_U_SUP | ALUA_S_SUP | ALUA_AN_SUP | ALUA_AO_SUP;
1741
1742         if (def_group) {
1743                 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1744                 tg_pt_gp->tg_pt_gp_id =
1745                                 dev->t10_alua.alua_tg_pt_gps_counter++;
1746                 tg_pt_gp->tg_pt_gp_valid_id = 1;
1747                 dev->t10_alua.alua_tg_pt_gps_count++;
1748                 list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1749                               &dev->t10_alua.tg_pt_gps_list);
1750                 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1751         }
1752
1753         return tg_pt_gp;
1754 }
1755
1756 int core_alua_set_tg_pt_gp_id(
1757         struct t10_alua_tg_pt_gp *tg_pt_gp,
1758         u16 tg_pt_gp_id)
1759 {
1760         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1761         struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
1762         u16 tg_pt_gp_id_tmp;
1763
1764         /*
1765          * The tg_pt_gp->tg_pt_gp_id may only be set once..
1766          */
1767         if (tg_pt_gp->tg_pt_gp_valid_id) {
1768                 pr_warn("ALUA TG PT Group already has a valid ID,"
1769                         " ignoring request\n");
1770                 return -EINVAL;
1771         }
1772
1773         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1774         if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
1775                 pr_err("Maximum ALUA alua_tg_pt_gps_count:"
1776                         " 0x0000ffff reached\n");
1777                 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1778                 kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1779                 return -ENOSPC;
1780         }
1781 again:
1782         tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
1783                         dev->t10_alua.alua_tg_pt_gps_counter++;
1784
1785         list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
1786                         tg_pt_gp_list) {
1787                 if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
1788                         if (!tg_pt_gp_id)
1789                                 goto again;
1790
1791                         pr_err("ALUA Target Port Group ID: %hu already"
1792                                 " exists, ignoring request\n", tg_pt_gp_id);
1793                         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1794                         return -EINVAL;
1795                 }
1796         }
1797
1798         tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
1799         tg_pt_gp->tg_pt_gp_valid_id = 1;
1800         list_add_tail(&tg_pt_gp->tg_pt_gp_list,
1801                         &dev->t10_alua.tg_pt_gps_list);
1802         dev->t10_alua.alua_tg_pt_gps_count++;
1803         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1804
1805         return 0;
1806 }
1807
1808 struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
1809         struct se_port *port)
1810 {
1811         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1812
1813         tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
1814                                 GFP_KERNEL);
1815         if (!tg_pt_gp_mem) {
1816                 pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
1817                 return ERR_PTR(-ENOMEM);
1818         }
1819         INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1820         spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1821         atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
1822
1823         tg_pt_gp_mem->tg_pt = port;
1824         port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
1825
1826         return tg_pt_gp_mem;
1827 }
1828
1829 void core_alua_free_tg_pt_gp(
1830         struct t10_alua_tg_pt_gp *tg_pt_gp)
1831 {
1832         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1833         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
1834
1835         /*
1836          * Once we have reached this point, config_item_put() has already
1837          * been called from target_core_alua_drop_tg_pt_gp().
1838          *
1839          * Here we remove *tg_pt_gp from the global list so that
1840          * no associations *OR* explicit ALUA via SET_TARGET_PORT_GROUPS
1841          * can be made while we are releasing struct t10_alua_tg_pt_gp.
1842          */
1843         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1844         list_del(&tg_pt_gp->tg_pt_gp_list);
1845         dev->t10_alua.alua_tg_pt_gps_counter--;
1846         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1847
1848         flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
1849
1850         /*
1851          * Allow a struct t10_alua_tg_pt_gp_member * referenced by
1852          * core_alua_get_tg_pt_gp_by_name() in
1853          * target_core_configfs.c:target_core_store_alua_tg_pt_gp()
1854          * to be released with core_alua_put_tg_pt_gp_from_name().
1855          */
1856         while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
1857                 cpu_relax();
1858
1859         /*
1860          * Release reference to struct t10_alua_tg_pt_gp from all associated
1861          * struct se_port.
1862          */
1863         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1864         list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
1865                         &tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
1866                 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1867                         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1868                         tg_pt_gp->tg_pt_gp_members--;
1869                         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1870                 }
1871                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1872                 /*
1873                  * tg_pt_gp_mem is associated with a single
1874                  * se_port->sep_alua_tg_pt_gp_mem, and is released via
1875                  * core_alua_free_tg_pt_gp_mem().
1876                  *
1877                  * If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
1878                  * assume we want to re-associate a given tg_pt_gp_mem with
1879                  * default_tg_pt_gp.
1880                  */
1881                 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1882                 if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
1883                         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
1884                                         dev->t10_alua.default_tg_pt_gp);
1885                 } else
1886                         tg_pt_gp_mem->tg_pt_gp = NULL;
1887                 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1888
1889                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1890         }
1891         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1892
1893         kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
1894 }
1895
1896 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
1897 {
1898         struct t10_alua_tg_pt_gp *tg_pt_gp;
1899         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1900
1901         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1902         if (!tg_pt_gp_mem)
1903                 return;
1904
1905         while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
1906                 cpu_relax();
1907
1908         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1909         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
1910         if (tg_pt_gp) {
1911                 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1912                 if (tg_pt_gp_mem->tg_pt_gp_assoc) {
1913                         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1914                         tg_pt_gp->tg_pt_gp_members--;
1915                         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1916                 }
1917                 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1918                 tg_pt_gp_mem->tg_pt_gp = NULL;
1919         }
1920         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
1921
1922         kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
1923 }
1924
1925 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
1926                 struct se_device *dev, const char *name)
1927 {
1928         struct t10_alua_tg_pt_gp *tg_pt_gp;
1929         struct config_item *ci;
1930
1931         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1932         list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
1933                         tg_pt_gp_list) {
1934                 if (!tg_pt_gp->tg_pt_gp_valid_id)
1935                         continue;
1936                 ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
1937                 if (!strcmp(config_item_name(ci), name)) {
1938                         atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1939                         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1940                         return tg_pt_gp;
1941                 }
1942         }
1943         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1944
1945         return NULL;
1946 }
1947
1948 static void core_alua_put_tg_pt_gp_from_name(
1949         struct t10_alua_tg_pt_gp *tg_pt_gp)
1950 {
1951         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
1952
1953         spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1954         atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1955         spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1956 }
1957
1958 /*
1959  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1960  */
1961 void __core_alua_attach_tg_pt_gp_mem(
1962         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1963         struct t10_alua_tg_pt_gp *tg_pt_gp)
1964 {
1965         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1966         tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
1967         tg_pt_gp_mem->tg_pt_gp_assoc = 1;
1968         list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
1969                         &tg_pt_gp->tg_pt_gp_mem_list);
1970         tg_pt_gp->tg_pt_gp_members++;
1971         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1972 }
1973
1974 /*
1975  * Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
1976  */
1977 static void __core_alua_drop_tg_pt_gp_mem(
1978         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
1979         struct t10_alua_tg_pt_gp *tg_pt_gp)
1980 {
1981         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1982         list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
1983         tg_pt_gp_mem->tg_pt_gp = NULL;
1984         tg_pt_gp_mem->tg_pt_gp_assoc = 0;
1985         tg_pt_gp->tg_pt_gp_members--;
1986         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1987 }
1988
1989 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
1990 {
1991         struct config_item *tg_pt_ci;
1992         struct t10_alua_tg_pt_gp *tg_pt_gp;
1993         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
1994         ssize_t len = 0;
1995
1996         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
1997         if (!tg_pt_gp_mem)
1998                 return len;
1999
2000         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2001         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2002         if (tg_pt_gp) {
2003                 tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
2004                 len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
2005                         " %hu\nTG Port Primary Access State: %s\nTG Port "
2006                         "Primary Access Status: %s\nTG Port Secondary Access"
2007                         " State: %s\nTG Port Secondary Access Status: %s\n",
2008                         config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
2009                         core_alua_dump_state(atomic_read(
2010                                         &tg_pt_gp->tg_pt_gp_alua_access_state)),
2011                         core_alua_dump_status(
2012                                 tg_pt_gp->tg_pt_gp_alua_access_status),
2013                         (atomic_read(&port->sep_tg_pt_secondary_offline)) ?
2014                         "Offline" : "None",
2015                         core_alua_dump_status(port->sep_tg_pt_secondary_stat));
2016         }
2017         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2018
2019         return len;
2020 }
2021
2022 ssize_t core_alua_store_tg_pt_gp_info(
2023         struct se_port *port,
2024         const char *page,
2025         size_t count)
2026 {
2027         struct se_portal_group *tpg;
2028         struct se_lun *lun;
2029         struct se_device *dev = port->sep_lun->lun_se_dev;
2030         struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
2031         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2032         unsigned char buf[TG_PT_GROUP_NAME_BUF];
2033         int move = 0;
2034
2035         tpg = port->sep_tpg;
2036         lun = port->sep_lun;
2037
2038         tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
2039         if (!tg_pt_gp_mem)
2040                 return 0;
2041
2042         if (count > TG_PT_GROUP_NAME_BUF) {
2043                 pr_err("ALUA Target Port Group alias too large!\n");
2044                 return -EINVAL;
2045         }
2046         memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2047         memcpy(buf, page, count);
2048         /*
2049          * Any ALUA target port group alias besides "NULL" means we will be
2050          * making a new group association.
2051          */
2052         if (strcmp(strstrip(buf), "NULL")) {
2053                 /*
2054                  * core_alua_get_tg_pt_gp_by_name() will increment reference to
2055                  * struct t10_alua_tg_pt_gp.  This reference is released with
2056                  * core_alua_put_tg_pt_gp_from_name() below.
2057                  */
2058                 tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
2059                                         strstrip(buf));
2060                 if (!tg_pt_gp_new)
2061                         return -ENODEV;
2062         }
2063
2064         spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2065         tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
2066         if (tg_pt_gp) {
2067                 /*
2068                  * Clearing an existing tg_pt_gp association, and replacing
2069                  * with the default_tg_pt_gp.
2070                  */
2071                 if (!tg_pt_gp_new) {
2072                         pr_debug("Target_Core_ConfigFS: Moving"
2073                                 " %s/tpgt_%hu/%s from ALUA Target Port Group:"
2074                                 " alua/%s, ID: %hu back to"
2075                                 " default_tg_pt_gp\n",
2076                                 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2077                                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2078                                 config_item_name(&lun->lun_group.cg_item),
2079                                 config_item_name(
2080                                         &tg_pt_gp->tg_pt_gp_group.cg_item),
2081                                 tg_pt_gp->tg_pt_gp_id);
2082
2083                         __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2084                         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
2085                                         dev->t10_alua.default_tg_pt_gp);
2086                         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2087
2088                         return count;
2089                 }
2090                 /*
2091                  * Removing existing association of tg_pt_gp_mem with tg_pt_gp
2092                  */
2093                 __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
2094                 move = 1;
2095         }
2096         /*
2097          * Associate tg_pt_gp_mem with tg_pt_gp_new.
2098          */
2099         __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
2100         spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
2101         pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
2102                 " Target Port Group: alua/%s, ID: %hu\n", (move) ?
2103                 "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2104                 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2105                 config_item_name(&lun->lun_group.cg_item),
2106                 config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
2107                 tg_pt_gp_new->tg_pt_gp_id);
2108
2109         core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
2110         return count;
2111 }
2112
2113 ssize_t core_alua_show_access_type(
2114         struct t10_alua_tg_pt_gp *tg_pt_gp,
2115         char *page)
2116 {
2117         if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA) &&
2118             (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA))
2119                 return sprintf(page, "Implicit and Explicit\n");
2120         else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)
2121                 return sprintf(page, "Implicit\n");
2122         else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)
2123                 return sprintf(page, "Explicit\n");
2124         else
2125                 return sprintf(page, "None\n");
2126 }
2127
2128 ssize_t core_alua_store_access_type(
2129         struct t10_alua_tg_pt_gp *tg_pt_gp,
2130         const char *page,
2131         size_t count)
2132 {
2133         unsigned long tmp;
2134         int ret;
2135
2136         ret = kstrtoul(page, 0, &tmp);
2137         if (ret < 0) {
2138                 pr_err("Unable to extract alua_access_type\n");
2139                 return ret;
2140         }
2141         if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
2142                 pr_err("Illegal value for alua_access_type:"
2143                                 " %lu\n", tmp);
2144                 return -EINVAL;
2145         }
2146         if (tmp == 3)
2147                 tg_pt_gp->tg_pt_gp_alua_access_type =
2148                         TPGS_IMPLICIT_ALUA | TPGS_EXPLICIT_ALUA;
2149         else if (tmp == 2)
2150                 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICIT_ALUA;
2151         else if (tmp == 1)
2152                 tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICIT_ALUA;
2153         else
2154                 tg_pt_gp->tg_pt_gp_alua_access_type = 0;
2155
2156         return count;
2157 }
2158
2159 ssize_t core_alua_show_nonop_delay_msecs(
2160         struct t10_alua_tg_pt_gp *tg_pt_gp,
2161         char *page)
2162 {
2163         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
2164 }
2165
2166 ssize_t core_alua_store_nonop_delay_msecs(
2167         struct t10_alua_tg_pt_gp *tg_pt_gp,
2168         const char *page,
2169         size_t count)
2170 {
2171         unsigned long tmp;
2172         int ret;
2173
2174         ret = kstrtoul(page, 0, &tmp);
2175         if (ret < 0) {
2176                 pr_err("Unable to extract nonop_delay_msecs\n");
2177                 return ret;
2178         }
2179         if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
2180                 pr_err("Passed nonop_delay_msecs: %lu, exceeds"
2181                         " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
2182                         ALUA_MAX_NONOP_DELAY_MSECS);
2183                 return -EINVAL;
2184         }
2185         tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
2186
2187         return count;
2188 }
2189
2190 ssize_t core_alua_show_trans_delay_msecs(
2191         struct t10_alua_tg_pt_gp *tg_pt_gp,
2192         char *page)
2193 {
2194         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
2195 }
2196
2197 ssize_t core_alua_store_trans_delay_msecs(
2198         struct t10_alua_tg_pt_gp *tg_pt_gp,
2199         const char *page,
2200         size_t count)
2201 {
2202         unsigned long tmp;
2203         int ret;
2204
2205         ret = kstrtoul(page, 0, &tmp);
2206         if (ret < 0) {
2207                 pr_err("Unable to extract trans_delay_msecs\n");
2208                 return ret;
2209         }
2210         if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
2211                 pr_err("Passed trans_delay_msecs: %lu, exceeds"
2212                         " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
2213                         ALUA_MAX_TRANS_DELAY_MSECS);
2214                 return -EINVAL;
2215         }
2216         tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
2217
2218         return count;
2219 }
2220
2221 ssize_t core_alua_show_implicit_trans_secs(
2222         struct t10_alua_tg_pt_gp *tg_pt_gp,
2223         char *page)
2224 {
2225         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs);
2226 }
2227
2228 ssize_t core_alua_store_implicit_trans_secs(
2229         struct t10_alua_tg_pt_gp *tg_pt_gp,
2230         const char *page,
2231         size_t count)
2232 {
2233         unsigned long tmp;
2234         int ret;
2235
2236         ret = kstrtoul(page, 0, &tmp);
2237         if (ret < 0) {
2238                 pr_err("Unable to extract implicit_trans_secs\n");
2239                 return ret;
2240         }
2241         if (tmp > ALUA_MAX_IMPLICIT_TRANS_SECS) {
2242                 pr_err("Passed implicit_trans_secs: %lu, exceeds"
2243                         " ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp,
2244                         ALUA_MAX_IMPLICIT_TRANS_SECS);
2245                 return  -EINVAL;
2246         }
2247         tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int)tmp;
2248
2249         return count;
2250 }
2251
2252 ssize_t core_alua_show_preferred_bit(
2253         struct t10_alua_tg_pt_gp *tg_pt_gp,
2254         char *page)
2255 {
2256         return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
2257 }
2258
2259 ssize_t core_alua_store_preferred_bit(
2260         struct t10_alua_tg_pt_gp *tg_pt_gp,
2261         const char *page,
2262         size_t count)
2263 {
2264         unsigned long tmp;
2265         int ret;
2266
2267         ret = kstrtoul(page, 0, &tmp);
2268         if (ret < 0) {
2269                 pr_err("Unable to extract preferred ALUA value\n");
2270                 return ret;
2271         }
2272         if ((tmp != 0) && (tmp != 1)) {
2273                 pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
2274                 return -EINVAL;
2275         }
2276         tg_pt_gp->tg_pt_gp_pref = (int)tmp;
2277
2278         return count;
2279 }
2280
2281 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
2282 {
2283         if (!lun->lun_sep)
2284                 return -ENODEV;
2285
2286         return sprintf(page, "%d\n",
2287                 atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
2288 }
2289
2290 ssize_t core_alua_store_offline_bit(
2291         struct se_lun *lun,
2292         const char *page,
2293         size_t count)
2294 {
2295         struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
2296         unsigned long tmp;
2297         int ret;
2298
2299         if (!lun->lun_sep)
2300                 return -ENODEV;
2301
2302         ret = kstrtoul(page, 0, &tmp);
2303         if (ret < 0) {
2304                 pr_err("Unable to extract alua_tg_pt_offline value\n");
2305                 return ret;
2306         }
2307         if ((tmp != 0) && (tmp != 1)) {
2308                 pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
2309                                 tmp);
2310                 return -EINVAL;
2311         }
2312         tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
2313         if (!tg_pt_gp_mem) {
2314                 pr_err("Unable to locate *tg_pt_gp_mem\n");
2315                 return -EINVAL;
2316         }
2317
2318         ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
2319                         lun->lun_sep, 0, (int)tmp);
2320         if (ret < 0)
2321                 return -EINVAL;
2322
2323         return count;
2324 }
2325
2326 ssize_t core_alua_show_secondary_status(
2327         struct se_lun *lun,
2328         char *page)
2329 {
2330         return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
2331 }
2332
2333 ssize_t core_alua_store_secondary_status(
2334         struct se_lun *lun,
2335         const char *page,
2336         size_t count)
2337 {
2338         unsigned long tmp;
2339         int ret;
2340
2341         ret = kstrtoul(page, 0, &tmp);
2342         if (ret < 0) {
2343                 pr_err("Unable to extract alua_tg_pt_status\n");
2344                 return ret;
2345         }
2346         if ((tmp != ALUA_STATUS_NONE) &&
2347             (tmp != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2348             (tmp != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2349                 pr_err("Illegal value for alua_tg_pt_status: %lu\n",
2350                                 tmp);
2351                 return -EINVAL;
2352         }
2353         lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
2354
2355         return count;
2356 }
2357
2358 ssize_t core_alua_show_secondary_write_metadata(
2359         struct se_lun *lun,
2360         char *page)
2361 {
2362         return sprintf(page, "%d\n",
2363                         lun->lun_sep->sep_tg_pt_secondary_write_md);
2364 }
2365
2366 ssize_t core_alua_store_secondary_write_metadata(
2367         struct se_lun *lun,
2368         const char *page,
2369         size_t count)
2370 {
2371         unsigned long tmp;
2372         int ret;
2373
2374         ret = kstrtoul(page, 0, &tmp);
2375         if (ret < 0) {
2376                 pr_err("Unable to extract alua_tg_pt_write_md\n");
2377                 return ret;
2378         }
2379         if ((tmp != 0) && (tmp != 1)) {
2380                 pr_err("Illegal value for alua_tg_pt_write_md:"
2381                                 " %lu\n", tmp);
2382                 return -EINVAL;
2383         }
2384         lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
2385
2386         return count;
2387 }
2388
2389 int core_setup_alua(struct se_device *dev)
2390 {
2391         if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
2392             !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
2393                 struct t10_alua_lu_gp_member *lu_gp_mem;
2394
2395                 /*
2396                  * Associate this struct se_device with the default ALUA
2397                  * LUN Group.
2398                  */
2399                 lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
2400                 if (IS_ERR(lu_gp_mem))
2401                         return PTR_ERR(lu_gp_mem);
2402
2403                 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2404                 __core_alua_attach_lu_gp_mem(lu_gp_mem,
2405                                 default_lu_gp);
2406                 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2407
2408                 pr_debug("%s: Adding to default ALUA LU Group:"
2409                         " core/alua/lu_gps/default_lu_gp\n",
2410                         dev->transport->name);
2411         }
2412
2413         return 0;
2414 }