Print this page
XXXX introduce drv_sectohz
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/qlc/ql_api.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /* Copyright 2010 QLogic Corporation */
23 23
24 24 /*
25 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 26 */
27 27 /*
28 28 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
29 29 */
30 30
31 31 #pragma ident "Copyright 2010 QLogic Corporation; ql_api.c"
32 32
33 33 /*
34 34 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
35 35 *
36 36 * ***********************************************************************
37 37 * * **
38 38 * * NOTICE **
39 39 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
40 40 * * ALL RIGHTS RESERVED **
41 41 * * **
42 42 * ***********************************************************************
43 43 *
44 44 */
45 45
46 46 #include <ql_apps.h>
47 47 #include <ql_api.h>
48 48 #include <ql_debug.h>
49 49 #include <ql_init.h>
50 50 #include <ql_iocb.h>
51 51 #include <ql_ioctl.h>
52 52 #include <ql_isr.h>
53 53 #include <ql_mbx.h>
54 54 #include <ql_nx.h>
55 55 #include <ql_xioctl.h>
56 56
57 57 /*
58 58 * Solaris external defines.
59 59 */
60 60 extern pri_t minclsyspri;
61 61 extern pri_t maxclsyspri;
62 62
63 63 /*
64 64 * dev_ops functions prototypes
65 65 */
66 66 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
67 67 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
68 68 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
69 69 static int ql_power(dev_info_t *, int, int);
70 70 static int ql_quiesce(dev_info_t *);
71 71
72 72 /*
73 73 * FCA functions prototypes exported by means of the transport table
74 74 */
75 75 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
76 76 fc_fca_bind_info_t *);
77 77 static void ql_unbind_port(opaque_t);
78 78 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
79 79 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
80 80 static int ql_els_send(opaque_t, fc_packet_t *);
81 81 static int ql_get_cap(opaque_t, char *, void *);
82 82 static int ql_set_cap(opaque_t, char *, void *);
83 83 static int ql_getmap(opaque_t, fc_lilpmap_t *);
84 84 static int ql_transport(opaque_t, fc_packet_t *);
85 85 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
86 86 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
87 87 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
88 88 static int ql_abort(opaque_t, fc_packet_t *, int);
89 89 static int ql_reset(opaque_t, uint32_t);
90 90 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
91 91 static opaque_t ql_get_device(opaque_t, fc_portid_t);
92 92
93 93 /*
94 94 * FCA Driver Support Function Prototypes.
95 95 */
96 96 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
97 97 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
98 98 ql_srb_t *);
99 99 static void ql_task_daemon(void *);
100 100 static void ql_task_thread(ql_adapter_state_t *);
101 101 static void ql_unsol_callback(ql_srb_t *);
102 102 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
103 103 fc_unsol_buf_t *);
104 104 static void ql_timer(void *);
105 105 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
106 106 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
107 107 uint32_t *, uint32_t *);
108 108 static void ql_halt(ql_adapter_state_t *, int);
109 109 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
110 110 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
111 111 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
112 112 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
113 113 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
114 114 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
115 115 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
116 116 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
117 117 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
118 118 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
119 119 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
120 120 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
121 121 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
122 122 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
123 123 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
124 124 static int ql_login_port(ql_adapter_state_t *, port_id_t);
125 125 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
126 126 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
127 127 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
128 128 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
129 129 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
130 130 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
131 131 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
132 132 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
133 133 ql_srb_t *);
134 134 static int ql_kstat_update(kstat_t *, int);
135 135 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
136 136 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
137 137 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
138 138 static void ql_rst_aen(ql_adapter_state_t *);
139 139 static void ql_restart_queues(ql_adapter_state_t *);
140 140 static void ql_abort_queues(ql_adapter_state_t *);
141 141 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
142 142 static void ql_idle_check(ql_adapter_state_t *);
143 143 static int ql_loop_resync(ql_adapter_state_t *);
144 144 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
145 145 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
146 146 static int ql_save_config_regs(dev_info_t *);
147 147 static int ql_restore_config_regs(dev_info_t *);
148 148 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
149 149 static int ql_handle_rscn_update(ql_adapter_state_t *);
150 150 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
151 151 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
152 152 static int ql_dump_firmware(ql_adapter_state_t *);
153 153 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
154 154 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
155 155 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
156 156 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
157 157 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
158 158 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
159 159 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
160 160 void *);
161 161 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
162 162 uint8_t);
163 163 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
164 164 static int ql_suspend_adapter(ql_adapter_state_t *);
165 165 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
166 166 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
167 167 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
168 168 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
169 169 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
170 170 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
171 171 static int ql_setup_interrupts(ql_adapter_state_t *);
172 172 static int ql_setup_msi(ql_adapter_state_t *);
173 173 static int ql_setup_msix(ql_adapter_state_t *);
174 174 static int ql_setup_fixed(ql_adapter_state_t *);
175 175 static void ql_release_intr(ql_adapter_state_t *);
176 176 static void ql_disable_intr(ql_adapter_state_t *);
177 177 static int ql_legacy_intr(ql_adapter_state_t *);
178 178 static int ql_init_mutex(ql_adapter_state_t *);
179 179 static void ql_destroy_mutex(ql_adapter_state_t *);
180 180 static void ql_iidma(ql_adapter_state_t *);
181 181
182 182 static int ql_n_port_plogi(ql_adapter_state_t *);
183 183 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
184 184 els_descriptor_t *);
185 185 static void ql_isp_els_request_ctor(els_descriptor_t *,
186 186 els_passthru_entry_t *);
187 187 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
188 188 static int ql_wait_for_td_stop(ql_adapter_state_t *);
189 189 static void ql_process_idc_event(ql_adapter_state_t *);
190 190
191 191 /*
192 192 * Global data
193 193 */
194 194 static uint8_t ql_enable_pm = 1;
195 195 static int ql_flash_sbus_fpga = 0;
196 196 uint32_t ql_os_release_level;
197 197 uint32_t ql_disable_aif = 0;
198 198 uint32_t ql_disable_msi = 0;
199 199 uint32_t ql_disable_msix = 0;
200 200 uint32_t ql_enable_ets = 0;
201 201 uint16_t ql_osc_wait_count = 1000;
202 202
203 203 /* Timer routine variables. */
204 204 static timeout_id_t ql_timer_timeout_id = NULL;
205 205 static clock_t ql_timer_ticks;
206 206
207 207 /* Soft state head pointer. */
208 208 void *ql_state = NULL;
209 209
210 210 /* Head adapter link. */
211 211 ql_head_t ql_hba = {
212 212 NULL,
213 213 NULL
214 214 };
215 215
216 216 /* Global hba index */
217 217 uint32_t ql_gfru_hba_index = 1;
218 218
219 219 /*
220 220 * Some IP defines and globals
221 221 */
222 222 uint32_t ql_ip_buffer_count = 128;
223 223 uint32_t ql_ip_low_water = 10;
224 224 uint8_t ql_ip_fast_post_count = 5;
225 225 static int ql_ip_mtu = 65280; /* equivalent to FCIPMTU */
226 226
227 227 /* Device AL_PA to Device Head Queue index array. */
228 228 uint8_t ql_alpa_to_index[] = {
229 229 0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
230 230 0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
231 231 0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
232 232 0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
233 233 0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
234 234 0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
235 235 0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
236 236 0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
237 237 0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
238 238 0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
239 239 0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
240 240 0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
241 241 0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
242 242 0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
243 243 0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
244 244 0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
245 245 0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
246 246 0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
247 247 0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
248 248 0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
249 249 0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
250 250 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
251 251 0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
252 252 0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
253 253 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
254 254 0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
255 255 };
256 256
257 257 /* Device loop_id to ALPA array. */
258 258 static uint8_t ql_index_to_alpa[] = {
259 259 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
260 260 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
261 261 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
262 262 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
263 263 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
264 264 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
265 265 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
266 266 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
267 267 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
268 268 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
269 269 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
270 270 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
271 271 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
272 272 };
273 273
274 274 /* 2200 register offsets */
275 275 static reg_off_t reg_off_2200 = {
276 276 0x00, /* flash_address */
277 277 0x02, /* flash_data */
278 278 0x06, /* ctrl_status */
279 279 0x08, /* ictrl */
280 280 0x0a, /* istatus */
281 281 0x0c, /* semaphore */
282 282 0x0e, /* nvram */
283 283 0x18, /* req_in */
284 284 0x18, /* req_out */
285 285 0x1a, /* resp_in */
286 286 0x1a, /* resp_out */
287 287 0xff, /* risc2host - n/a */
288 288 24, /* Number of mailboxes */
289 289
290 290 /* Mailbox in register offsets 0 - 23 */
291 291 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
292 292 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
293 293 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
294 294 /* 2200 does not have mailbox 24-31 - n/a */
295 295 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
296 296
297 297 /* Mailbox out register offsets 0 - 23 */
298 298 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
299 299 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
300 300 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
301 301 /* 2200 does not have mailbox 24-31 - n/a */
302 302 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
303 303
304 304 0x96, /* fpm_diag_config */
305 305 0xa4, /* pcr */
306 306 0xb0, /* mctr */
307 307 0xb8, /* fb_cmd */
308 308 0xc0, /* hccr */
309 309 0xcc, /* gpiod */
310 310 0xce, /* gpioe */
311 311 0xff, /* host_to_host_sema - n/a */
312 312 0xff, /* pri_req_in - n/a */
313 313 0xff, /* pri_req_out - n/a */
314 314 0xff, /* atio_req_in - n/a */
315 315 0xff, /* atio_req_out - n/a */
316 316 0xff, /* io_base_addr - n/a */
317 317 0xff, /* nx_host_int - n/a */
318 318 0xff /* nx_risc_int - n/a */
319 319 };
320 320
321 321 /* 2300 register offsets */
322 322 static reg_off_t reg_off_2300 = {
323 323 0x00, /* flash_address */
324 324 0x02, /* flash_data */
325 325 0x06, /* ctrl_status */
326 326 0x08, /* ictrl */
327 327 0x0a, /* istatus */
328 328 0x0c, /* semaphore */
329 329 0x0e, /* nvram */
330 330 0x10, /* req_in */
331 331 0x12, /* req_out */
332 332 0x14, /* resp_in */
333 333 0x16, /* resp_out */
334 334 0x18, /* risc2host */
335 335 32, /* Number of mailboxes */
336 336
337 337 /* Mailbox in register offsets 0 - 31 */
338 338 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
339 339 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
340 340 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
341 341 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
342 342
343 343 /* Mailbox out register offsets 0 - 31 */
344 344 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
345 345 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
346 346 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
347 347 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
348 348
349 349 0x96, /* fpm_diag_config */
350 350 0xa4, /* pcr */
351 351 0xb0, /* mctr */
352 352 0x80, /* fb_cmd */
353 353 0xc0, /* hccr */
354 354 0xcc, /* gpiod */
355 355 0xce, /* gpioe */
356 356 0x1c, /* host_to_host_sema */
357 357 0xff, /* pri_req_in - n/a */
358 358 0xff, /* pri_req_out - n/a */
359 359 0xff, /* atio_req_in - n/a */
360 360 0xff, /* atio_req_out - n/a */
361 361 0xff, /* io_base_addr - n/a */
362 362 0xff, /* nx_host_int - n/a */
363 363 0xff /* nx_risc_int - n/a */
364 364 };
365 365
366 366 /* 2400/2500 register offsets */
367 367 reg_off_t reg_off_2400_2500 = {
368 368 0x00, /* flash_address */
369 369 0x04, /* flash_data */
370 370 0x08, /* ctrl_status */
371 371 0x0c, /* ictrl */
372 372 0x10, /* istatus */
373 373 0xff, /* semaphore - n/a */
374 374 0xff, /* nvram - n/a */
375 375 0x1c, /* req_in */
376 376 0x20, /* req_out */
377 377 0x24, /* resp_in */
378 378 0x28, /* resp_out */
379 379 0x44, /* risc2host */
380 380 32, /* Number of mailboxes */
381 381
382 382 /* Mailbox in register offsets 0 - 31 */
383 383 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
384 384 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
385 385 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
386 386 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
387 387
388 388 /* Mailbox out register offsets 0 - 31 */
389 389 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
390 390 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
391 391 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
392 392 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
393 393
394 394 0xff, /* fpm_diag_config - n/a */
395 395 0xff, /* pcr - n/a */
396 396 0xff, /* mctr - n/a */
397 397 0xff, /* fb_cmd - n/a */
398 398 0x48, /* hccr */
399 399 0x4c, /* gpiod */
400 400 0x50, /* gpioe */
401 401 0xff, /* host_to_host_sema - n/a */
402 402 0x2c, /* pri_req_in */
403 403 0x30, /* pri_req_out */
404 404 0x3c, /* atio_req_in */
405 405 0x40, /* atio_req_out */
406 406 0x54, /* io_base_addr */
407 407 0xff, /* nx_host_int - n/a */
408 408 0xff /* nx_risc_int - n/a */
409 409 };
410 410
411 411 /* P3 register offsets */
412 412 static reg_off_t reg_off_8021 = {
413 413 0x00, /* flash_address */
414 414 0x04, /* flash_data */
415 415 0x08, /* ctrl_status */
416 416 0x0c, /* ictrl */
417 417 0x10, /* istatus */
418 418 0xff, /* semaphore - n/a */
419 419 0xff, /* nvram - n/a */
420 420 0xff, /* req_in - n/a */
421 421 0x0, /* req_out */
422 422 0x100, /* resp_in */
423 423 0x200, /* resp_out */
424 424 0x500, /* risc2host */
425 425 32, /* Number of mailboxes */
426 426
427 427 /* Mailbox in register offsets 0 - 31 */
428 428 0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
429 429 0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
430 430 0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
431 431 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
432 432
433 433 /* Mailbox out register offsets 0 - 31 */
434 434 0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
435 435 0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
436 436 0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
437 437 0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
438 438
439 439 0xff, /* fpm_diag_config - n/a */
440 440 0xff, /* pcr - n/a */
441 441 0xff, /* mctr - n/a */
442 442 0xff, /* fb_cmd - n/a */
443 443 0x48, /* hccr */
444 444 0x4c, /* gpiod */
445 445 0x50, /* gpioe */
446 446 0xff, /* host_to_host_sema - n/a */
447 447 0x2c, /* pri_req_in */
448 448 0x30, /* pri_req_out */
449 449 0x3c, /* atio_req_in */
450 450 0x40, /* atio_req_out */
451 451 0x54, /* io_base_addr */
452 452 0x380, /* nx_host_int */
453 453 0x504 /* nx_risc_int */
454 454 };
455 455
456 456 /* mutex for protecting variables shared by all instances of the driver */
457 457 kmutex_t ql_global_mutex;
458 458 kmutex_t ql_global_hw_mutex;
459 459 kmutex_t ql_global_el_mutex;
460 460
461 461 /* DMA access attribute structure. */
462 462 static ddi_device_acc_attr_t ql_dev_acc_attr = {
463 463 DDI_DEVICE_ATTR_V0,
464 464 DDI_STRUCTURE_LE_ACC,
465 465 DDI_STRICTORDER_ACC
466 466 };
467 467
468 468 /* I/O DMA attributes structures. */
469 469 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
470 470 DMA_ATTR_V0, /* dma_attr_version */
471 471 QL_DMA_LOW_ADDRESS, /* low DMA address range */
472 472 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
473 473 QL_DMA_XFER_COUNTER, /* DMA counter register */
474 474 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
475 475 QL_DMA_BURSTSIZES, /* DMA burstsizes */
476 476 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
477 477 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
478 478 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
479 479 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
480 480 QL_DMA_GRANULARITY, /* granularity of device */
481 481 QL_DMA_XFER_FLAGS /* DMA transfer flags */
482 482 };
483 483
484 484 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
485 485 DMA_ATTR_V0, /* dma_attr_version */
486 486 QL_DMA_LOW_ADDRESS, /* low DMA address range */
487 487 QL_DMA_HIGH_32BIT_ADDRESS, /* high DMA address range */
488 488 QL_DMA_XFER_COUNTER, /* DMA counter register */
489 489 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
490 490 QL_DMA_BURSTSIZES, /* DMA burstsizes */
491 491 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
492 492 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
493 493 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
494 494 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
495 495 QL_DMA_GRANULARITY, /* granularity of device */
496 496 QL_DMA_XFER_FLAGS /* DMA transfer flags */
497 497 };
498 498
499 499 /* Load the default dma attributes */
500 500 static ddi_dma_attr_t ql_32fcsm_cmd_dma_attr;
501 501 static ddi_dma_attr_t ql_64fcsm_cmd_dma_attr;
502 502 static ddi_dma_attr_t ql_32fcsm_rsp_dma_attr;
503 503 static ddi_dma_attr_t ql_64fcsm_rsp_dma_attr;
504 504 static ddi_dma_attr_t ql_32fcip_cmd_dma_attr;
505 505 static ddi_dma_attr_t ql_64fcip_cmd_dma_attr;
506 506 static ddi_dma_attr_t ql_32fcip_rsp_dma_attr;
507 507 static ddi_dma_attr_t ql_64fcip_rsp_dma_attr;
508 508 static ddi_dma_attr_t ql_32fcp_cmd_dma_attr;
509 509 static ddi_dma_attr_t ql_64fcp_cmd_dma_attr;
510 510 static ddi_dma_attr_t ql_32fcp_rsp_dma_attr;
511 511 static ddi_dma_attr_t ql_64fcp_rsp_dma_attr;
512 512 static ddi_dma_attr_t ql_32fcp_data_dma_attr;
513 513 static ddi_dma_attr_t ql_64fcp_data_dma_attr;
514 514
515 515 /* Static declarations of cb_ops entry point functions... */
516 516 static struct cb_ops ql_cb_ops = {
517 517 ql_open, /* b/c open */
518 518 ql_close, /* b/c close */
519 519 nodev, /* b strategy */
520 520 nodev, /* b print */
521 521 nodev, /* b dump */
522 522 nodev, /* c read */
523 523 nodev, /* c write */
524 524 ql_ioctl, /* c ioctl */
525 525 nodev, /* c devmap */
526 526 nodev, /* c mmap */
527 527 nodev, /* c segmap */
528 528 nochpoll, /* c poll */
529 529 nodev, /* cb_prop_op */
530 530 NULL, /* streamtab */
531 531 D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */
532 532 CB_REV, /* cb_ops revision */
533 533 nodev, /* c aread */
534 534 nodev /* c awrite */
535 535 };
536 536
537 537 /* Static declarations of dev_ops entry point functions... */
538 538 static struct dev_ops ql_devops = {
539 539 DEVO_REV, /* devo_rev */
540 540 0, /* refcnt */
541 541 ql_getinfo, /* getinfo */
542 542 nulldev, /* identify */
543 543 nulldev, /* probe */
544 544 ql_attach, /* attach */
545 545 ql_detach, /* detach */
546 546 nodev, /* reset */
547 547 &ql_cb_ops, /* char/block ops */
548 548 NULL, /* bus operations */
549 549 ql_power, /* power management */
550 550 ql_quiesce /* quiesce device */
551 551 };
552 552
553 553 /* ELS command code to text converter */
554 554 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
555 555 /* Mailbox command code to text converter */
556 556 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
557 557
558 558 char qlc_driver_version[] = QL_VERSION;
559 559
560 560 /*
561 561 * Loadable Driver Interface Structures.
562 562 * Declare and initialize the module configuration section...
563 563 */
564 564 static struct modldrv modldrv = {
565 565 &mod_driverops, /* type of module: driver */
566 566 "SunFC Qlogic FCA v" QL_VERSION, /* name of module */
567 567 &ql_devops /* driver dev_ops */
568 568 };
569 569
570 570 static struct modlinkage modlinkage = {
571 571 MODREV_1,
572 572 &modldrv,
573 573 NULL
574 574 };
575 575
576 576 /* ************************************************************************ */
577 577 /* Loadable Module Routines. */
578 578 /* ************************************************************************ */
579 579
580 580 /*
581 581 * _init
582 582 * Initializes a loadable module. It is called before any other
583 583 * routine in a loadable module.
584 584 *
585 585 * Returns:
586 586 * 0 = success
587 587 *
588 588 * Context:
589 589 * Kernel context.
590 590 */
591 591 int
592 592 _init(void)
593 593 {
594 594 uint16_t w16;
595 595 int rval = 0;
596 596
597 597 /* Get OS major release level. */
598 598 for (w16 = 0; w16 < sizeof (utsname.release); w16++) {
599 599 if (utsname.release[w16] == '.') {
600 600 w16++;
601 601 break;
602 602 }
603 603 }
604 604 if (w16 < sizeof (utsname.release)) {
605 605 (void) ql_bstr_to_dec(&utsname.release[w16],
606 606 &ql_os_release_level, 0);
607 607 } else {
608 608 ql_os_release_level = 0;
609 609 }
610 610 if (ql_os_release_level < 6) {
611 611 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
612 612 QL_NAME, ql_os_release_level);
613 613 rval = EINVAL;
614 614 }
615 615 if (ql_os_release_level == 6) {
616 616 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
617 617 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
618 618 }
619 619
620 620 if (rval == 0) {
621 621 rval = ddi_soft_state_init(&ql_state,
622 622 sizeof (ql_adapter_state_t), 0);
623 623 }
624 624 if (rval == 0) {
625 625 /* allow the FC Transport to tweak the dev_ops */
626 626 fc_fca_init(&ql_devops);
627 627
628 628 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
629 629 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
630 630 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
631 631 rval = mod_install(&modlinkage);
632 632 if (rval != 0) {
633 633 mutex_destroy(&ql_global_hw_mutex);
634 634 mutex_destroy(&ql_global_mutex);
635 635 mutex_destroy(&ql_global_el_mutex);
636 636 ddi_soft_state_fini(&ql_state);
637 637 } else {
638 638 /*EMPTY*/
639 639 ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
640 640 ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
641 641 ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
642 642 ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
643 643 ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
644 644 ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
645 645 ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
646 646 ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
647 647 ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
648 648 ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
649 649 ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
650 650 ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
651 651 ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
652 652 ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
653 653 ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
654 654 ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
655 655 QL_FCSM_CMD_SGLLEN;
656 656 ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
657 657 ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
658 658 QL_FCSM_RSP_SGLLEN;
659 659 ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
660 660 ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
661 661 QL_FCIP_CMD_SGLLEN;
662 662 ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
663 663 ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
664 664 QL_FCIP_RSP_SGLLEN;
665 665 ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
666 666 ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
667 667 QL_FCP_CMD_SGLLEN;
668 668 ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
669 669 ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
670 670 QL_FCP_RSP_SGLLEN;
671 671 }
672 672 }
673 673
674 674 if (rval != 0) {
675 675 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
676 676 QL_NAME);
677 677 }
678 678
679 679 return (rval);
680 680 }
681 681
682 682 /*
683 683 * _fini
684 684 * Prepares a module for unloading. It is called when the system
685 685 * wants to unload a module. If the module determines that it can
686 686 * be unloaded, then _fini() returns the value returned by
687 687 * mod_remove(). Upon successful return from _fini() no other
688 688 * routine in the module will be called before _init() is called.
689 689 *
690 690 * Returns:
691 691 * 0 = success
692 692 *
693 693 * Context:
694 694 * Kernel context.
695 695 */
696 696 int
697 697 _fini(void)
698 698 {
699 699 int rval;
700 700
701 701 rval = mod_remove(&modlinkage);
702 702 if (rval == 0) {
703 703 mutex_destroy(&ql_global_hw_mutex);
704 704 mutex_destroy(&ql_global_mutex);
705 705 mutex_destroy(&ql_global_el_mutex);
706 706 ddi_soft_state_fini(&ql_state);
707 707 }
708 708
709 709 return (rval);
710 710 }
711 711
712 712 /*
713 713 * _info
714 714 * Returns information about loadable module.
715 715 *
716 716 * Input:
717 717 * modinfo = pointer to module information structure.
718 718 *
719 719 * Returns:
720 720 * Value returned by mod_info().
721 721 *
722 722 * Context:
723 723 * Kernel context.
724 724 */
725 725 int
726 726 _info(struct modinfo *modinfop)
727 727 {
728 728 return (mod_info(&modlinkage, modinfop));
729 729 }
730 730
731 731 /* ************************************************************************ */
732 732 /* dev_ops functions */
733 733 /* ************************************************************************ */
734 734
735 735 /*
736 736 * ql_getinfo
737 737 * Returns the pointer associated with arg when cmd is
738 738 * set to DDI_INFO_DEVT2DEVINFO, or it should return the
739 739 * instance number associated with arg when cmd is set
740 740 * to DDI_INFO_DEV2INSTANCE.
741 741 *
742 742 * Input:
743 743 * dip = Do not use.
744 744 * cmd = command argument.
745 745 * arg = command specific argument.
746 746 * resultp = pointer to where request information is stored.
747 747 *
748 748 * Returns:
749 749 * DDI_SUCCESS or DDI_FAILURE.
750 750 *
751 751 * Context:
752 752 * Kernel context.
753 753 */
754 754 /* ARGSUSED */
755 755 static int
756 756 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
757 757 {
758 758 ql_adapter_state_t *ha;
759 759 int minor;
760 760 int rval = DDI_FAILURE;
761 761
762 762 minor = (int)(getminor((dev_t)arg));
763 763 ha = ddi_get_soft_state(ql_state, minor);
764 764 if (ha == NULL) {
765 765 QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
766 766 getminor((dev_t)arg));
767 767 *resultp = NULL;
768 768 return (rval);
769 769 }
770 770
771 771 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
772 772
773 773 switch (cmd) {
774 774 case DDI_INFO_DEVT2DEVINFO:
775 775 *resultp = ha->dip;
776 776 rval = DDI_SUCCESS;
777 777 break;
778 778 case DDI_INFO_DEVT2INSTANCE:
779 779 *resultp = (void *)(uintptr_t)(ha->instance);
780 780 rval = DDI_SUCCESS;
781 781 break;
782 782 default:
783 783 EL(ha, "failed, unsupported cmd=%d\n", cmd);
784 784 rval = DDI_FAILURE;
785 785 break;
786 786 }
787 787
788 788 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
789 789
790 790 return (rval);
791 791 }
792 792
793 793 /*
794 794 * ql_attach
795 795 * Configure and attach an instance of the driver
796 796 * for a port.
797 797 *
798 798 * Input:
799 799 * dip = pointer to device information structure.
800 800 * cmd = attach type.
801 801 *
802 802 * Returns:
803 803 * DDI_SUCCESS or DDI_FAILURE.
804 804 *
805 805 * Context:
806 806 * Kernel context.
807 807 */
808 808 static int
809 809 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
810 810 {
811 811 off_t regsize;
812 812 uint32_t size;
813 813 int rval, *ptr;
814 814 int instance;
815 815 uint_t progress = 0;
816 816 char *buf;
817 817 ushort_t caps_ptr, cap;
818 818 fc_fca_tran_t *tran;
819 819 ql_adapter_state_t *ha = NULL;
820 820
821 821 static char *pmcomps[] = {
822 822 NULL,
823 823 PM_LEVEL_D3_STR, /* Device OFF */
824 824 PM_LEVEL_D0_STR, /* Device ON */
825 825 };
826 826
827 827 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
828 828 ddi_get_instance(dip), cmd);
829 829
830 830 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
831 831
832 832 switch (cmd) {
833 833 case DDI_ATTACH:
834 834 /* first get the instance */
835 835 instance = ddi_get_instance(dip);
836 836
837 837 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
838 838 QL_NAME, instance, QL_VERSION);
839 839
840 840 /* Correct OS version? */
841 841 if (ql_os_release_level != 11) {
842 842 cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
843 843 "11", QL_NAME, instance);
844 844 goto attach_failed;
845 845 }
846 846
847 847 /* Hardware is installed in a DMA-capable slot? */
848 848 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
849 849 cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
850 850 instance);
851 851 goto attach_failed;
852 852 }
853 853
854 854 /* No support for high-level interrupts */
855 855 if (ddi_intr_hilevel(dip, 0) != 0) {
856 856 cmn_err(CE_WARN, "%s(%d): High level interrupt"
857 857 " not supported", QL_NAME, instance);
858 858 goto attach_failed;
859 859 }
860 860
861 861 /* Allocate our per-device-instance structure */
862 862 if (ddi_soft_state_zalloc(ql_state,
863 863 instance) != DDI_SUCCESS) {
864 864 cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
865 865 QL_NAME, instance);
866 866 goto attach_failed;
867 867 }
868 868 progress |= QL_SOFT_STATE_ALLOCED;
869 869
870 870 ha = ddi_get_soft_state(ql_state, instance);
871 871 if (ha == NULL) {
872 872 cmn_err(CE_WARN, "%s(%d): can't get soft state",
873 873 QL_NAME, instance);
874 874 goto attach_failed;
875 875 }
876 876 ha->dip = dip;
877 877 ha->instance = instance;
878 878 ha->hba.base_address = ha;
879 879 ha->pha = ha;
880 880
881 881 if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
882 882 cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
883 883 QL_NAME, instance);
884 884 goto attach_failed;
885 885 }
886 886
887 887 /* Get extended logging and dump flags. */
888 888 ql_common_properties(ha);
889 889
890 890 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
891 891 "sbus") == 0) {
892 892 EL(ha, "%s SBUS card detected", QL_NAME);
893 893 ha->cfg_flags |= CFG_SBUS_CARD;
894 894 }
895 895
896 896 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
897 897 DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
898 898
899 899 ha->outstanding_cmds = kmem_zalloc(
900 900 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
901 901 KM_SLEEP);
902 902
903 903 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
904 904 QL_UB_LIMIT, KM_SLEEP);
905 905
906 906 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
907 907 KM_SLEEP);
908 908
909 909 (void) ddi_pathname(dip, buf);
910 910 ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
911 911 if (ha->devpath == NULL) {
912 912 EL(ha, "devpath mem alloc failed\n");
913 913 } else {
914 914 (void) strcpy(ha->devpath, buf);
915 915 EL(ha, "devpath is: %s\n", ha->devpath);
916 916 }
917 917
918 918 if (CFG_IST(ha, CFG_SBUS_CARD)) {
919 919 /*
920 920 * For cards where PCI is mapped to sbus e.g. Ivory.
921 921 *
922 922 * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
923 923 * : 0x100 - 0x3FF PCI IO space for 2200
924 924 * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
925 925 * : 0x100 - 0x3FF PCI IO Space for fpga
926 926 */
927 927 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
928 928 0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
929 929 DDI_SUCCESS) {
930 930 cmn_err(CE_WARN, "%s(%d): Unable to map device"
931 931 " registers", QL_NAME, instance);
932 932 goto attach_failed;
933 933 }
934 934 if (ddi_regs_map_setup(dip, 1,
935 935 (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
936 936 &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
937 937 DDI_SUCCESS) {
938 938 /* We should not fail attach here */
939 939 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
940 940 QL_NAME, instance);
941 941 ha->sbus_fpga_iobase = NULL;
942 942 }
943 943 progress |= QL_REGS_MAPPED;
944 944
945 945 /*
946 946 * We should map config space before adding interrupt
947 947 * So that the chip type (2200 or 2300) can be
948 948 * determined before the interrupt routine gets a
949 949 * chance to execute.
950 950 */
951 951 if (ddi_regs_map_setup(dip, 0,
952 952 (caddr_t *)&ha->sbus_config_base, 0, 0x100,
953 953 &ql_dev_acc_attr, &ha->sbus_config_handle) !=
954 954 DDI_SUCCESS) {
955 955 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
956 956 "config registers", QL_NAME, instance);
957 957 goto attach_failed;
958 958 }
959 959 progress |= QL_CONFIG_SPACE_SETUP;
960 960 } else {
961 961 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
962 962 rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
963 963 DDI_PROP_DONTPASS, "reg", &ptr, &size);
964 964 if (rval != DDI_PROP_SUCCESS) {
965 965 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
966 966 "address registers", QL_NAME, instance);
967 967 goto attach_failed;
968 968 } else {
969 969 ha->pci_bus_addr = ptr[0];
970 970 ha->function_number = (uint8_t)
971 971 (ha->pci_bus_addr >> 8 & 7);
972 972 ddi_prop_free(ptr);
973 973 }
974 974
975 975 /*
976 976 * We should map config space before adding interrupt
977 977 * So that the chip type (2200 or 2300) can be
978 978 * determined before the interrupt routine gets a
979 979 * chance to execute.
980 980 */
981 981 if (pci_config_setup(ha->dip, &ha->pci_handle) !=
982 982 DDI_SUCCESS) {
983 983 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
984 984 "config space", QL_NAME, instance);
985 985 goto attach_failed;
986 986 }
987 987 progress |= QL_CONFIG_SPACE_SETUP;
988 988
989 989 /*
990 990 * Setup the ISP2200 registers address mapping to be
991 991 * accessed by this particular driver.
992 992 * 0x0 Configuration Space
993 993 * 0x1 I/O Space
994 994 * 0x2 32-bit Memory Space address
995 995 * 0x3 64-bit Memory Space address
996 996 */
997 997 size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
998 998 2 : 1;
999 999 if (ddi_dev_regsize(dip, size, ®size) !=
1000 1000 DDI_SUCCESS ||
1001 1001 ddi_regs_map_setup(dip, size, &ha->iobase,
1002 1002 0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
1003 1003 DDI_SUCCESS) {
1004 1004 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
1005 1005 "failed", QL_NAME, instance);
1006 1006 goto attach_failed;
1007 1007 }
1008 1008 progress |= QL_REGS_MAPPED;
1009 1009
1010 1010 /*
1011 1011 * We need I/O space mappings for 23xx HBAs for
1012 1012 * loading flash (FCode). The chip has a bug due to
1013 1013 * which loading flash fails through mem space
1014 1014 * mappings in PCI-X mode.
1015 1015 */
1016 1016 if (size == 1) {
1017 1017 ha->iomap_iobase = ha->iobase;
1018 1018 ha->iomap_dev_handle = ha->dev_handle;
1019 1019 } else {
1020 1020 if (ddi_dev_regsize(dip, 1, ®size) !=
1021 1021 DDI_SUCCESS ||
1022 1022 ddi_regs_map_setup(dip, 1,
1023 1023 &ha->iomap_iobase, 0, regsize,
1024 1024 &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1025 1025 DDI_SUCCESS) {
1026 1026 cmn_err(CE_WARN, "%s(%d): regs_map_"
1027 1027 "setup(I/O) failed", QL_NAME,
1028 1028 instance);
1029 1029 goto attach_failed;
1030 1030 }
1031 1031 progress |= QL_IOMAP_IOBASE_MAPPED;
1032 1032 }
1033 1033 }
1034 1034
1035 1035 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1036 1036 PCI_CONF_SUBSYSID);
1037 1037 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1038 1038 PCI_CONF_SUBVENID);
1039 1039 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1040 1040 PCI_CONF_VENID);
1041 1041 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1042 1042 PCI_CONF_DEVID);
1043 1043 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1044 1044 PCI_CONF_REVID);
1045 1045
1046 1046 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1047 1047 "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1048 1048 ha->subven_id, ha->subsys_id);
1049 1049
1050 1050 switch (ha->device_id) {
1051 1051 case 0x2300:
1052 1052 case 0x2312:
1053 1053 case 0x2322:
1054 1054 case 0x6312:
1055 1055 case 0x6322:
1056 1056 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1057 1057 ha->flags |= FUNCTION_1;
1058 1058 }
1059 1059 if ((ha->device_id == 0x6322) ||
1060 1060 (ha->device_id == 0x2322)) {
1061 1061 ha->cfg_flags |= CFG_CTRL_6322;
1062 1062 ha->fw_class = 0x6322;
1063 1063 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1064 1064 } else {
1065 1065 ha->cfg_flags |= CFG_CTRL_2300;
1066 1066 ha->fw_class = 0x2300;
1067 1067 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1068 1068 }
1069 1069 ha->reg_off = ®_off_2300;
1070 1070 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1071 1071 goto attach_failed;
1072 1072 }
1073 1073 ha->fcp_cmd = ql_command_iocb;
1074 1074 ha->ip_cmd = ql_ip_iocb;
1075 1075 ha->ms_cmd = ql_ms_iocb;
1076 1076 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1077 1077 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1078 1078 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1079 1079 } else {
1080 1080 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1081 1081 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1082 1082 }
1083 1083 break;
1084 1084
1085 1085 case 0x2200:
1086 1086 ha->cfg_flags |= CFG_CTRL_2200;
1087 1087 ha->reg_off = ®_off_2200;
1088 1088 ha->fw_class = 0x2200;
1089 1089 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1090 1090 goto attach_failed;
1091 1091 }
1092 1092 ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1093 1093 ha->fcp_cmd = ql_command_iocb;
1094 1094 ha->ip_cmd = ql_ip_iocb;
1095 1095 ha->ms_cmd = ql_ms_iocb;
1096 1096 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1097 1097 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1098 1098 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1099 1099 } else {
1100 1100 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1101 1101 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1102 1102 }
1103 1103 break;
1104 1104
1105 1105 case 0x2422:
1106 1106 case 0x2432:
1107 1107 case 0x5422:
1108 1108 case 0x5432:
1109 1109 case 0x8432:
1110 1110 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1111 1111 ha->flags |= FUNCTION_1;
1112 1112 }
1113 1113 ha->cfg_flags |= CFG_CTRL_2422;
1114 1114 if (ha->device_id == 0x8432) {
1115 1115 ha->cfg_flags |= CFG_CTRL_MENLO;
1116 1116 } else {
1117 1117 ha->flags |= VP_ENABLED;
1118 1118 }
1119 1119
1120 1120 ha->reg_off = ®_off_2400_2500;
1121 1121 ha->fw_class = 0x2400;
1122 1122 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1123 1123 goto attach_failed;
1124 1124 }
1125 1125 ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1126 1126 ha->fcp_cmd = ql_command_24xx_iocb;
1127 1127 ha->ip_cmd = ql_ip_24xx_iocb;
1128 1128 ha->ms_cmd = ql_ms_24xx_iocb;
1129 1129 ha->els_cmd = ql_els_24xx_iocb;
1130 1130 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1131 1131 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1132 1132 break;
1133 1133
1134 1134 case 0x2522:
1135 1135 case 0x2532:
1136 1136 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1137 1137 ha->flags |= FUNCTION_1;
1138 1138 }
1139 1139 ha->cfg_flags |= CFG_CTRL_25XX;
1140 1140 ha->flags |= VP_ENABLED;
1141 1141 ha->fw_class = 0x2500;
1142 1142 ha->reg_off = ®_off_2400_2500;
1143 1143 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1144 1144 goto attach_failed;
1145 1145 }
1146 1146 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1147 1147 ha->fcp_cmd = ql_command_24xx_iocb;
1148 1148 ha->ip_cmd = ql_ip_24xx_iocb;
1149 1149 ha->ms_cmd = ql_ms_24xx_iocb;
1150 1150 ha->els_cmd = ql_els_24xx_iocb;
1151 1151 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1152 1152 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1153 1153 break;
1154 1154
1155 1155 case 0x8001:
1156 1156 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1157 1157 ha->flags |= FUNCTION_1;
1158 1158 }
1159 1159 ha->cfg_flags |= CFG_CTRL_81XX;
1160 1160 ha->flags |= VP_ENABLED;
1161 1161 ha->fw_class = 0x8100;
1162 1162 ha->reg_off = ®_off_2400_2500;
1163 1163 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1164 1164 goto attach_failed;
1165 1165 }
1166 1166 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1167 1167 ha->fcp_cmd = ql_command_24xx_iocb;
1168 1168 ha->ip_cmd = ql_ip_24xx_iocb;
1169 1169 ha->ms_cmd = ql_ms_24xx_iocb;
1170 1170 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1171 1171 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1172 1172 break;
1173 1173
1174 1174 case 0x8021:
1175 1175 if (ha->function_number & BIT_0) {
1176 1176 ha->flags |= FUNCTION_1;
1177 1177 }
1178 1178 ha->cfg_flags |= CFG_CTRL_8021;
1179 1179 ha->reg_off = ®_off_8021;
1180 1180 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1181 1181 ha->fcp_cmd = ql_command_24xx_iocb;
1182 1182 ha->ms_cmd = ql_ms_24xx_iocb;
1183 1183 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1184 1184 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1185 1185
1186 1186 ha->nx_pcibase = ha->iobase;
1187 1187 ha->iobase += 0xBC000 + (ha->function_number << 11);
1188 1188 ha->iomap_iobase += 0xBC000 +
1189 1189 (ha->function_number << 11);
1190 1190
1191 1191 /* map doorbell */
1192 1192 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
1193 1193 ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1194 1194 0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1195 1195 DDI_SUCCESS) {
1196 1196 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1197 1197 "(doorbell) failed", QL_NAME, instance);
1198 1198 goto attach_failed;
1199 1199 }
1200 1200 progress |= QL_DB_IOBASE_MAPPED;
1201 1201
1202 1202 ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1203 1203 (ha->function_number << 12));
1204 1204 ha->db_read = ha->nx_pcibase + (512 * 1024) +
1205 1205 (ha->function_number * 8);
1206 1206
1207 1207 ql_8021_update_crb_int_ptr(ha);
1208 1208 ql_8021_set_drv_active(ha);
1209 1209 break;
1210 1210
1211 1211 default:
1212 1212 cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1213 1213 QL_NAME, instance, ha->device_id);
1214 1214 goto attach_failed;
1215 1215 }
1216 1216
1217 1217 /* Setup hba buffer. */
1218 1218
1219 1219 size = CFG_IST(ha, CFG_CTRL_24258081) ?
1220 1220 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1221 1221 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1222 1222 RCVBUF_QUEUE_SIZE);
1223 1223
1224 1224 if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1225 1225 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1226 1226 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1227 1227 "alloc failed", QL_NAME, instance);
1228 1228 goto attach_failed;
1229 1229 }
1230 1230 progress |= QL_HBA_BUFFER_SETUP;
1231 1231
1232 1232 /* Setup buffer pointers. */
1233 1233 ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1234 1234 REQUEST_Q_BUFFER_OFFSET;
1235 1235 ha->request_ring_bp = (struct cmd_entry *)
1236 1236 ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1237 1237
1238 1238 ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1239 1239 RESPONSE_Q_BUFFER_OFFSET;
1240 1240 ha->response_ring_bp = (struct sts_entry *)
1241 1241 ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1242 1242
1243 1243 ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1244 1244 RCVBUF_Q_BUFFER_OFFSET;
1245 1245 ha->rcvbuf_ring_bp = (struct rcvbuf *)
1246 1246 ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1247 1247
1248 1248 /* Allocate resource for QLogic IOCTL */
1249 1249 (void) ql_alloc_xioctl_resource(ha);
1250 1250
1251 1251 /* Setup interrupts */
1252 1252 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1253 1253 cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1254 1254 "rval=%xh", QL_NAME, instance, rval);
1255 1255 goto attach_failed;
1256 1256 }
1257 1257
1258 1258 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1259 1259
1260 1260 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1261 1261 cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1262 1262 QL_NAME, instance);
1263 1263 goto attach_failed;
1264 1264 }
1265 1265
1266 1266 /*
1267 1267 * Allocate an N Port information structure
1268 1268 * for use when in P2P topology.
1269 1269 */
1270 1270 ha->n_port = (ql_n_port_info_t *)
1271 1271 kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1272 1272 if (ha->n_port == NULL) {
1273 1273 cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1274 1274 QL_NAME, instance);
1275 1275 goto attach_failed;
1276 1276 }
1277 1277
1278 1278 progress |= QL_N_PORT_INFO_CREATED;
1279 1279
1280 1280 /*
1281 1281 * Determine support for Power Management
1282 1282 */
1283 1283 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1284 1284
1285 1285 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1286 1286 cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1287 1287 if (cap == PCI_CAP_ID_PM) {
1288 1288 ha->pm_capable = 1;
1289 1289 break;
1290 1290 }
1291 1291 caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1292 1292 PCI_CAP_NEXT_PTR);
1293 1293 }
1294 1294
1295 1295 if (ha->pm_capable) {
1296 1296 /*
1297 1297 * Enable PM for 2200 based HBAs only.
1298 1298 */
1299 1299 if (ha->device_id != 0x2200) {
1300 1300 ha->pm_capable = 0;
1301 1301 }
1302 1302 }
1303 1303
1304 1304 if (ha->pm_capable) {
1305 1305 ha->pm_capable = ql_enable_pm;
1306 1306 }
1307 1307
1308 1308 if (ha->pm_capable) {
1309 1309 /*
1310 1310 * Initialize power management bookkeeping;
1311 1311 * components are created idle.
1312 1312 */
1313 1313 (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1314 1314 pmcomps[0] = buf;
1315 1315
1316 1316 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1317 1317 if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1318 1318 dip, "pm-components", pmcomps,
1319 1319 sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1320 1320 DDI_PROP_SUCCESS) {
1321 1321 cmn_err(CE_WARN, "%s(%d): failed to create"
1322 1322 " pm-components property", QL_NAME,
1323 1323 instance);
1324 1324
1325 1325 /* Initialize adapter. */
1326 1326 ha->power_level = PM_LEVEL_D0;
1327 1327 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1328 1328 cmn_err(CE_WARN, "%s(%d): failed to"
1329 1329 " initialize adapter", QL_NAME,
1330 1330 instance);
1331 1331 goto attach_failed;
1332 1332 }
1333 1333 } else {
1334 1334 ha->power_level = PM_LEVEL_D3;
1335 1335 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1336 1336 PM_LEVEL_D0) != DDI_SUCCESS) {
1337 1337 cmn_err(CE_WARN, "%s(%d): failed to"
1338 1338 " raise power or initialize"
1339 1339 " adapter", QL_NAME, instance);
1340 1340 }
1341 1341 }
1342 1342 } else {
1343 1343 /* Initialize adapter. */
1344 1344 ha->power_level = PM_LEVEL_D0;
1345 1345 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1346 1346 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1347 1347 " adapter", QL_NAME, instance);
1348 1348 }
1349 1349 }
1350 1350
1351 1351 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1352 1352 ha->fw_subminor_version == 0) {
1353 1353 cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1354 1354 QL_NAME, ha->instance);
1355 1355 } else {
1356 1356 int rval;
1357 1357 char ver_fmt[256];
1358 1358
1359 1359 rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1360 1360 "Firmware version %d.%d.%d", ha->fw_major_version,
1361 1361 ha->fw_minor_version, ha->fw_subminor_version);
1362 1362
1363 1363 if (CFG_IST(ha, CFG_CTRL_81XX)) {
1364 1364 rval = (int)snprintf(ver_fmt + rval,
1365 1365 (size_t)sizeof (ver_fmt),
1366 1366 ", MPI fw version %d.%d.%d",
1367 1367 ha->mpi_fw_major_version,
1368 1368 ha->mpi_fw_minor_version,
1369 1369 ha->mpi_fw_subminor_version);
1370 1370
1371 1371 if (ha->subsys_id == 0x17B ||
1372 1372 ha->subsys_id == 0x17D) {
1373 1373 (void) snprintf(ver_fmt + rval,
1374 1374 (size_t)sizeof (ver_fmt),
1375 1375 ", PHY fw version %d.%d.%d",
1376 1376 ha->phy_fw_major_version,
1377 1377 ha->phy_fw_minor_version,
1378 1378 ha->phy_fw_subminor_version);
1379 1379 }
1380 1380 }
1381 1381 cmn_err(CE_NOTE, "!%s(%d): %s",
1382 1382 QL_NAME, ha->instance, ver_fmt);
1383 1383 }
1384 1384
1385 1385 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1386 1386 "controller", KSTAT_TYPE_RAW,
1387 1387 (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1388 1388 if (ha->k_stats == NULL) {
1389 1389 cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1390 1390 QL_NAME, instance);
1391 1391 goto attach_failed;
1392 1392 }
1393 1393 progress |= QL_KSTAT_CREATED;
1394 1394
1395 1395 ha->adapter_stats->version = 1;
1396 1396 ha->k_stats->ks_data = (void *)ha->adapter_stats;
1397 1397 ha->k_stats->ks_private = ha;
1398 1398 ha->k_stats->ks_update = ql_kstat_update;
1399 1399 ha->k_stats->ks_ndata = 1;
1400 1400 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1401 1401 kstat_install(ha->k_stats);
1402 1402
1403 1403 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1404 1404 instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1405 1405 cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1406 1406 QL_NAME, instance);
1407 1407 goto attach_failed;
1408 1408 }
1409 1409 progress |= QL_MINOR_NODE_CREATED;
1410 1410
1411 1411 /* Allocate a transport structure for this instance */
1412 1412 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1413 1413 if (tran == NULL) {
1414 1414 cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1415 1415 QL_NAME, instance);
1416 1416 goto attach_failed;
1417 1417 }
1418 1418
1419 1419 progress |= QL_FCA_TRAN_ALLOCED;
1420 1420
1421 1421 /* fill in the structure */
1422 1422 tran->fca_numports = 1;
1423 1423 tran->fca_version = FCTL_FCA_MODREV_5;
1424 1424 if (CFG_IST(ha, CFG_CTRL_2422)) {
1425 1425 tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1426 1426 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
1427 1427 tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1428 1428 }
1429 1429 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1430 1430 tran->fca_perm_pwwn.raw_wwn, 8);
1431 1431
1432 1432 EL(ha, "FCA version %d\n", tran->fca_version);
1433 1433
1434 1434 /* Specify the amount of space needed in each packet */
1435 1435 tran->fca_pkt_size = sizeof (ql_srb_t);
1436 1436
1437 1437 /* command limits are usually dictated by hardware */
1438 1438 tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1439 1439
1440 1440 /* dmaattr are static, set elsewhere. */
1441 1441 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1442 1442 tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1443 1443 tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1444 1444 tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1445 1445 tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1446 1446 tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1447 1447 tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1448 1448 tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1449 1449 tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1450 1450 } else {
1451 1451 tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1452 1452 tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1453 1453 tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1454 1454 tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1455 1455 tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1456 1456 tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1457 1457 tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1458 1458 tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1459 1459 }
1460 1460
1461 1461 tran->fca_acc_attr = &ql_dev_acc_attr;
1462 1462 tran->fca_iblock = &(ha->iblock_cookie);
1463 1463
1464 1464 /* the remaining values are simply function vectors */
1465 1465 tran->fca_bind_port = ql_bind_port;
1466 1466 tran->fca_unbind_port = ql_unbind_port;
1467 1467 tran->fca_init_pkt = ql_init_pkt;
1468 1468 tran->fca_un_init_pkt = ql_un_init_pkt;
1469 1469 tran->fca_els_send = ql_els_send;
1470 1470 tran->fca_get_cap = ql_get_cap;
1471 1471 tran->fca_set_cap = ql_set_cap;
1472 1472 tran->fca_getmap = ql_getmap;
1473 1473 tran->fca_transport = ql_transport;
1474 1474 tran->fca_ub_alloc = ql_ub_alloc;
1475 1475 tran->fca_ub_free = ql_ub_free;
1476 1476 tran->fca_ub_release = ql_ub_release;
1477 1477 tran->fca_abort = ql_abort;
1478 1478 tran->fca_reset = ql_reset;
1479 1479 tran->fca_port_manage = ql_port_manage;
1480 1480 tran->fca_get_device = ql_get_device;
1481 1481
1482 1482 /* give it to the FC transport */
1483 1483 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1484 1484 cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1485 1485 instance);
1486 1486 goto attach_failed;
1487 1487 }
1488 1488 progress |= QL_FCA_ATTACH_DONE;
1489 1489
1490 1490 /* Stash the structure so it can be freed at detach */
↓ open down ↓ |
1490 lines elided |
↑ open up ↑ |
1491 1491 ha->tran = tran;
1492 1492
1493 1493 /* Acquire global state lock. */
1494 1494 GLOBAL_STATE_LOCK();
1495 1495
1496 1496 /* Add adapter structure to link list. */
1497 1497 ql_add_link_b(&ql_hba, &ha->hba);
1498 1498
1499 1499 /* Start one second driver timer. */
1500 1500 if (ql_timer_timeout_id == NULL) {
1501 - ql_timer_ticks = drv_usectohz(1000000);
1501 + ql_timer_ticks = drv_sectohz(1);
1502 1502 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1503 1503 ql_timer_ticks);
1504 1504 }
1505 1505
1506 1506 /* Release global state lock. */
1507 1507 GLOBAL_STATE_UNLOCK();
1508 1508
1509 1509 /* Determine and populate HBA fru info */
1510 1510 ql_setup_fruinfo(ha);
1511 1511
1512 1512 /* Setup task_daemon thread. */
1513 1513 (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1514 1514 0, &p0, TS_RUN, minclsyspri);
1515 1515
1516 1516 progress |= QL_TASK_DAEMON_STARTED;
1517 1517
1518 1518 ddi_report_dev(dip);
1519 1519
1520 1520 /* Disable link reset in panic path */
1521 1521 ha->lip_on_panic = 1;
1522 1522
1523 1523 rval = DDI_SUCCESS;
1524 1524 break;
1525 1525
1526 1526 attach_failed:
1527 1527 if (progress & QL_FCA_ATTACH_DONE) {
1528 1528 (void) fc_fca_detach(dip);
1529 1529 progress &= ~QL_FCA_ATTACH_DONE;
1530 1530 }
1531 1531
1532 1532 if (progress & QL_FCA_TRAN_ALLOCED) {
1533 1533 kmem_free(tran, sizeof (fc_fca_tran_t));
1534 1534 progress &= ~QL_FCA_TRAN_ALLOCED;
1535 1535 }
1536 1536
1537 1537 if (progress & QL_MINOR_NODE_CREATED) {
1538 1538 ddi_remove_minor_node(dip, "devctl");
1539 1539 progress &= ~QL_MINOR_NODE_CREATED;
1540 1540 }
1541 1541
1542 1542 if (progress & QL_KSTAT_CREATED) {
1543 1543 kstat_delete(ha->k_stats);
1544 1544 progress &= ~QL_KSTAT_CREATED;
1545 1545 }
1546 1546
1547 1547 if (progress & QL_N_PORT_INFO_CREATED) {
1548 1548 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1549 1549 progress &= ~QL_N_PORT_INFO_CREATED;
1550 1550 }
1551 1551
1552 1552 if (progress & QL_TASK_DAEMON_STARTED) {
1553 1553 TASK_DAEMON_LOCK(ha);
1554 1554
1555 1555 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1556 1556
1557 1557 cv_signal(&ha->cv_task_daemon);
1558 1558
1559 1559 /* Release task daemon lock. */
1560 1560 TASK_DAEMON_UNLOCK(ha);
1561 1561
1562 1562 /* Wait for for task daemon to stop running. */
1563 1563 while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1564 1564 ql_delay(ha, 10000);
1565 1565 }
1566 1566 progress &= ~QL_TASK_DAEMON_STARTED;
1567 1567 }
1568 1568
1569 1569 if (progress & QL_DB_IOBASE_MAPPED) {
1570 1570 ql_8021_clr_drv_active(ha);
1571 1571 ddi_regs_map_free(&ha->db_dev_handle);
1572 1572 progress &= ~QL_DB_IOBASE_MAPPED;
1573 1573 }
1574 1574 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1575 1575 ddi_regs_map_free(&ha->iomap_dev_handle);
1576 1576 progress &= ~QL_IOMAP_IOBASE_MAPPED;
1577 1577 }
1578 1578
1579 1579 if (progress & QL_CONFIG_SPACE_SETUP) {
1580 1580 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1581 1581 ddi_regs_map_free(&ha->sbus_config_handle);
1582 1582 } else {
1583 1583 pci_config_teardown(&ha->pci_handle);
1584 1584 }
1585 1585 progress &= ~QL_CONFIG_SPACE_SETUP;
1586 1586 }
1587 1587
1588 1588 if (progress & QL_INTR_ADDED) {
1589 1589 ql_disable_intr(ha);
1590 1590 ql_release_intr(ha);
1591 1591 progress &= ~QL_INTR_ADDED;
1592 1592 }
1593 1593
1594 1594 if (progress & QL_MUTEX_CV_INITED) {
1595 1595 ql_destroy_mutex(ha);
1596 1596 progress &= ~QL_MUTEX_CV_INITED;
1597 1597 }
1598 1598
1599 1599 if (progress & QL_HBA_BUFFER_SETUP) {
1600 1600 ql_free_phys(ha, &ha->hba_buf);
1601 1601 progress &= ~QL_HBA_BUFFER_SETUP;
1602 1602 }
1603 1603
1604 1604 if (progress & QL_REGS_MAPPED) {
1605 1605 ddi_regs_map_free(&ha->dev_handle);
1606 1606 if (ha->sbus_fpga_iobase != NULL) {
1607 1607 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1608 1608 }
1609 1609 progress &= ~QL_REGS_MAPPED;
1610 1610 }
1611 1611
1612 1612 if (progress & QL_SOFT_STATE_ALLOCED) {
1613 1613
1614 1614 ql_fcache_rel(ha->fcache);
1615 1615
1616 1616 kmem_free(ha->adapter_stats,
1617 1617 sizeof (*ha->adapter_stats));
1618 1618
1619 1619 kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1620 1620 QL_UB_LIMIT);
1621 1621
1622 1622 kmem_free(ha->outstanding_cmds,
1623 1623 sizeof (*ha->outstanding_cmds) *
1624 1624 MAX_OUTSTANDING_COMMANDS);
1625 1625
1626 1626 if (ha->devpath != NULL) {
1627 1627 kmem_free(ha->devpath,
1628 1628 strlen(ha->devpath) + 1);
1629 1629 }
1630 1630
1631 1631 kmem_free(ha->dev, sizeof (*ha->dev) *
1632 1632 DEVICE_HEAD_LIST_SIZE);
1633 1633
1634 1634 if (ha->xioctl != NULL) {
1635 1635 ql_free_xioctl_resource(ha);
1636 1636 }
1637 1637
1638 1638 if (ha->fw_module != NULL) {
1639 1639 (void) ddi_modclose(ha->fw_module);
1640 1640 }
1641 1641 (void) ql_el_trace_desc_dtor(ha);
1642 1642 (void) ql_nvram_cache_desc_dtor(ha);
1643 1643
1644 1644 ddi_soft_state_free(ql_state, instance);
1645 1645 progress &= ~QL_SOFT_STATE_ALLOCED;
1646 1646 }
1647 1647
1648 1648 ddi_prop_remove_all(dip);
1649 1649 rval = DDI_FAILURE;
1650 1650 break;
1651 1651
1652 1652 case DDI_RESUME:
1653 1653 rval = DDI_FAILURE;
1654 1654
1655 1655 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1656 1656 if (ha == NULL) {
1657 1657 cmn_err(CE_WARN, "%s(%d): can't get soft state",
1658 1658 QL_NAME, instance);
1659 1659 break;
1660 1660 }
1661 1661
1662 1662 ha->power_level = PM_LEVEL_D3;
1663 1663 if (ha->pm_capable) {
1664 1664 /*
1665 1665 * Get ql_power to do power on initialization
1666 1666 */
1667 1667 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1668 1668 PM_LEVEL_D0) != DDI_SUCCESS) {
1669 1669 cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1670 1670 " power", QL_NAME, instance);
1671 1671 }
1672 1672 }
1673 1673
1674 1674 /*
1675 1675 * There is a bug in DR that prevents PM framework
1676 1676 * from calling ql_power.
1677 1677 */
1678 1678 if (ha->power_level == PM_LEVEL_D3) {
1679 1679 ha->power_level = PM_LEVEL_D0;
1680 1680
1681 1681 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1682 1682 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1683 1683 " adapter", QL_NAME, instance);
1684 1684 }
1685 1685
1686 1686 /* Wake up task_daemon. */
1687 1687 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1688 1688 0);
1689 1689 }
1690 1690
1691 1691 /* Acquire global state lock. */
1692 1692 GLOBAL_STATE_LOCK();
1693 1693
1694 1694 /* Restart driver timer. */
1695 1695 if (ql_timer_timeout_id == NULL) {
1696 1696 ql_timer_timeout_id = timeout(ql_timer, (void *)0,
1697 1697 ql_timer_ticks);
1698 1698 }
1699 1699
1700 1700 /* Release global state lock. */
1701 1701 GLOBAL_STATE_UNLOCK();
1702 1702
1703 1703 /* Wake up command start routine. */
1704 1704 ADAPTER_STATE_LOCK(ha);
1705 1705 ha->flags &= ~ADAPTER_SUSPENDED;
1706 1706 ADAPTER_STATE_UNLOCK(ha);
1707 1707
1708 1708 /*
1709 1709 * Transport doesn't make FC discovery in polled
1710 1710 * mode; So we need the daemon thread's services
1711 1711 * right here.
1712 1712 */
1713 1713 (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1714 1714
1715 1715 rval = DDI_SUCCESS;
1716 1716
1717 1717 /* Restart IP if it was running. */
1718 1718 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1719 1719 (void) ql_initialize_ip(ha);
1720 1720 ql_isp_rcvbuf(ha);
1721 1721 }
1722 1722 break;
1723 1723
1724 1724 default:
1725 1725 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1726 1726 " %x", QL_NAME, ddi_get_instance(dip), cmd);
1727 1727 rval = DDI_FAILURE;
1728 1728 break;
1729 1729 }
1730 1730
1731 1731 kmem_free(buf, MAXPATHLEN);
1732 1732
1733 1733 if (rval != DDI_SUCCESS) {
1734 1734 /*EMPTY*/
1735 1735 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1736 1736 ddi_get_instance(dip), rval);
1737 1737 } else {
1738 1738 /*EMPTY*/
1739 1739 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1740 1740 }
1741 1741
1742 1742 return (rval);
1743 1743 }
1744 1744
1745 1745 /*
1746 1746 * ql_detach
1747 1747 * Used to remove all the states associated with a given
1748 1748 * instances of a device node prior to the removal of that
1749 1749 * instance from the system.
1750 1750 *
1751 1751 * Input:
1752 1752 * dip = pointer to device information structure.
1753 1753 * cmd = type of detach.
1754 1754 *
1755 1755 * Returns:
1756 1756 * DDI_SUCCESS or DDI_FAILURE.
1757 1757 *
1758 1758 * Context:
1759 1759 * Kernel context.
1760 1760 */
1761 1761 static int
1762 1762 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1763 1763 {
1764 1764 ql_adapter_state_t *ha, *vha;
1765 1765 ql_tgt_t *tq;
1766 1766 int delay_cnt;
1767 1767 uint16_t index;
1768 1768 ql_link_t *link;
1769 1769 char *buf;
1770 1770 timeout_id_t timer_id = NULL;
1771 1771 int suspend, rval = DDI_SUCCESS;
1772 1772
1773 1773 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1774 1774 if (ha == NULL) {
1775 1775 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1776 1776 ddi_get_instance(dip));
1777 1777 return (DDI_FAILURE);
1778 1778 }
1779 1779
1780 1780 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1781 1781
1782 1782 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1783 1783
1784 1784 switch (cmd) {
1785 1785 case DDI_DETACH:
1786 1786 ADAPTER_STATE_LOCK(ha);
1787 1787 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1788 1788 ADAPTER_STATE_UNLOCK(ha);
1789 1789
1790 1790 TASK_DAEMON_LOCK(ha);
1791 1791
1792 1792 if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1793 1793 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1794 1794 cv_signal(&ha->cv_task_daemon);
1795 1795
1796 1796 TASK_DAEMON_UNLOCK(ha);
1797 1797
1798 1798 (void) ql_wait_for_td_stop(ha);
1799 1799
1800 1800 TASK_DAEMON_LOCK(ha);
1801 1801 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1802 1802 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1803 1803 EL(ha, "failed, could not stop task daemon\n");
1804 1804 }
1805 1805 }
1806 1806 TASK_DAEMON_UNLOCK(ha);
1807 1807
1808 1808 GLOBAL_STATE_LOCK();
1809 1809
1810 1810 /* Disable driver timer if no adapters. */
1811 1811 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1812 1812 ql_hba.last == &ha->hba) {
1813 1813 timer_id = ql_timer_timeout_id;
1814 1814 ql_timer_timeout_id = NULL;
1815 1815 }
1816 1816 ql_remove_link(&ql_hba, &ha->hba);
1817 1817
1818 1818 GLOBAL_STATE_UNLOCK();
1819 1819
1820 1820 if (timer_id) {
1821 1821 (void) untimeout(timer_id);
1822 1822 }
1823 1823
1824 1824 if (ha->pm_capable) {
1825 1825 if (pm_lower_power(dip, QL_POWER_COMPONENT,
1826 1826 PM_LEVEL_D3) != DDI_SUCCESS) {
1827 1827 cmn_err(CE_WARN, "%s(%d): failed to lower the"
1828 1828 " power", QL_NAME, ha->instance);
1829 1829 }
1830 1830 }
1831 1831
1832 1832 /*
1833 1833 * If pm_lower_power shutdown the adapter, there
1834 1834 * isn't much else to do
1835 1835 */
1836 1836 if (ha->power_level != PM_LEVEL_D3) {
1837 1837 ql_halt(ha, PM_LEVEL_D3);
1838 1838 }
1839 1839
1840 1840 /* Remove virtual ports. */
1841 1841 while ((vha = ha->vp_next) != NULL) {
1842 1842 ql_vport_destroy(vha);
1843 1843 }
1844 1844
1845 1845 /* Free target queues. */
1846 1846 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1847 1847 link = ha->dev[index].first;
1848 1848 while (link != NULL) {
1849 1849 tq = link->base_address;
1850 1850 link = link->next;
1851 1851 ql_dev_free(ha, tq);
1852 1852 }
1853 1853 }
1854 1854
1855 1855 /*
1856 1856 * Free unsolicited buffers.
1857 1857 * If we are here then there are no ULPs still
1858 1858 * alive that wish to talk to ql so free up
1859 1859 * any SRB_IP_UB_UNUSED buffers that are
1860 1860 * lingering around
1861 1861 */
1862 1862 QL_UB_LOCK(ha);
1863 1863 for (index = 0; index < QL_UB_LIMIT; index++) {
1864 1864 fc_unsol_buf_t *ubp = ha->ub_array[index];
1865 1865
1866 1866 if (ubp != NULL) {
1867 1867 ql_srb_t *sp = ubp->ub_fca_private;
1868 1868
1869 1869 sp->flags |= SRB_UB_FREE_REQUESTED;
1870 1870
1871 1871 while (!(sp->flags & SRB_UB_IN_FCA) ||
1872 1872 (sp->flags & (SRB_UB_CALLBACK |
1873 1873 SRB_UB_ACQUIRED))) {
1874 1874 QL_UB_UNLOCK(ha);
1875 1875 delay(drv_usectohz(100000));
1876 1876 QL_UB_LOCK(ha);
1877 1877 }
1878 1878 ha->ub_array[index] = NULL;
1879 1879
1880 1880 QL_UB_UNLOCK(ha);
1881 1881 ql_free_unsolicited_buffer(ha, ubp);
1882 1882 QL_UB_LOCK(ha);
1883 1883 }
1884 1884 }
1885 1885 QL_UB_UNLOCK(ha);
1886 1886
1887 1887 /* Free any saved RISC code. */
1888 1888 if (ha->risc_code != NULL) {
1889 1889 kmem_free(ha->risc_code, ha->risc_code_size);
1890 1890 ha->risc_code = NULL;
1891 1891 ha->risc_code_size = 0;
1892 1892 }
1893 1893
1894 1894 if (ha->fw_module != NULL) {
1895 1895 (void) ddi_modclose(ha->fw_module);
1896 1896 ha->fw_module = NULL;
1897 1897 }
1898 1898
1899 1899 /* Free resources. */
1900 1900 ddi_prop_remove_all(dip);
1901 1901 (void) fc_fca_detach(dip);
1902 1902 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1903 1903 ddi_remove_minor_node(dip, "devctl");
1904 1904 if (ha->k_stats != NULL) {
1905 1905 kstat_delete(ha->k_stats);
1906 1906 }
1907 1907
1908 1908 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1909 1909 ddi_regs_map_free(&ha->sbus_config_handle);
1910 1910 } else {
1911 1911 if (CFG_IST(ha, CFG_CTRL_8021)) {
1912 1912 ql_8021_clr_drv_active(ha);
1913 1913 ddi_regs_map_free(&ha->db_dev_handle);
1914 1914 }
1915 1915 if (ha->iomap_dev_handle != ha->dev_handle) {
1916 1916 ddi_regs_map_free(&ha->iomap_dev_handle);
1917 1917 }
1918 1918 pci_config_teardown(&ha->pci_handle);
1919 1919 }
1920 1920
1921 1921 ql_disable_intr(ha);
1922 1922 ql_release_intr(ha);
1923 1923
1924 1924 ql_free_xioctl_resource(ha);
1925 1925
1926 1926 ql_destroy_mutex(ha);
1927 1927
1928 1928 ql_free_phys(ha, &ha->hba_buf);
1929 1929 ql_free_phys(ha, &ha->fwexttracebuf);
1930 1930 ql_free_phys(ha, &ha->fwfcetracebuf);
1931 1931
1932 1932 ddi_regs_map_free(&ha->dev_handle);
1933 1933 if (ha->sbus_fpga_iobase != NULL) {
1934 1934 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1935 1935 }
1936 1936
1937 1937 ql_fcache_rel(ha->fcache);
1938 1938 if (ha->vcache != NULL) {
1939 1939 kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1940 1940 }
1941 1941
1942 1942 if (ha->pi_attrs != NULL) {
1943 1943 kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1944 1944 }
1945 1945
1946 1946 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1947 1947
1948 1948 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1949 1949
1950 1950 kmem_free(ha->outstanding_cmds,
1951 1951 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1952 1952
1953 1953 if (ha->n_port != NULL) {
1954 1954 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1955 1955 }
1956 1956
1957 1957 if (ha->devpath != NULL) {
1958 1958 kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1959 1959 }
1960 1960
1961 1961 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1962 1962
1963 1963 EL(ha, "detached\n");
1964 1964
1965 1965 ddi_soft_state_free(ql_state, (int)ha->instance);
↓ open down ↓ |
454 lines elided |
↑ open up ↑ |
1966 1966
1967 1967 break;
1968 1968
1969 1969 case DDI_SUSPEND:
1970 1970 ADAPTER_STATE_LOCK(ha);
1971 1971
1972 1972 delay_cnt = 0;
1973 1973 ha->flags |= ADAPTER_SUSPENDED;
1974 1974 while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1975 1975 ADAPTER_STATE_UNLOCK(ha);
1976 - delay(drv_usectohz(1000000));
1976 + delay(drv_sectohz(1));
1977 1977 ADAPTER_STATE_LOCK(ha);
1978 1978 }
1979 1979 if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1980 1980 ha->flags &= ~ADAPTER_SUSPENDED;
1981 1981 ADAPTER_STATE_UNLOCK(ha);
1982 1982 rval = DDI_FAILURE;
1983 1983 cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1984 1984 " busy %xh flags %xh", QL_NAME, ha->instance,
1985 1985 ha->busy, ha->flags);
1986 1986 break;
1987 1987 }
1988 1988
1989 1989 ADAPTER_STATE_UNLOCK(ha);
1990 1990
1991 1991 if (ha->flags & IP_INITIALIZED) {
1992 1992 (void) ql_shutdown_ip(ha);
1993 1993 }
1994 1994
1995 1995 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1996 1996 ADAPTER_STATE_LOCK(ha);
1997 1997 ha->flags &= ~ADAPTER_SUSPENDED;
1998 1998 ADAPTER_STATE_UNLOCK(ha);
1999 1999 cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
2000 2000 QL_NAME, ha->instance, suspend);
2001 2001
2002 2002 /* Restart IP if it was running. */
2003 2003 if (ha->flags & IP_ENABLED &&
2004 2004 !(ha->flags & IP_INITIALIZED)) {
2005 2005 (void) ql_initialize_ip(ha);
2006 2006 ql_isp_rcvbuf(ha);
2007 2007 }
2008 2008 rval = DDI_FAILURE;
2009 2009 break;
2010 2010 }
2011 2011
2012 2012 /* Acquire global state lock. */
2013 2013 GLOBAL_STATE_LOCK();
2014 2014
2015 2015 /* Disable driver timer if last adapter. */
2016 2016 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2017 2017 ql_hba.last == &ha->hba) {
2018 2018 timer_id = ql_timer_timeout_id;
2019 2019 ql_timer_timeout_id = NULL;
2020 2020 }
2021 2021 GLOBAL_STATE_UNLOCK();
2022 2022
2023 2023 if (timer_id) {
2024 2024 (void) untimeout(timer_id);
2025 2025 }
2026 2026
2027 2027 EL(ha, "suspended\n");
2028 2028
2029 2029 break;
2030 2030
2031 2031 default:
2032 2032 rval = DDI_FAILURE;
2033 2033 break;
2034 2034 }
2035 2035
2036 2036 kmem_free(buf, MAXPATHLEN);
2037 2037
2038 2038 if (rval != DDI_SUCCESS) {
2039 2039 if (ha != NULL) {
2040 2040 EL(ha, "failed, rval = %xh\n", rval);
2041 2041 } else {
2042 2042 /*EMPTY*/
2043 2043 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2044 2044 ddi_get_instance(dip), rval);
2045 2045 }
2046 2046 } else {
2047 2047 /*EMPTY*/
2048 2048 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2049 2049 }
2050 2050
2051 2051 return (rval);
2052 2052 }
2053 2053
2054 2054
2055 2055 /*
2056 2056 * ql_power
2057 2057 * Power a device attached to the system.
2058 2058 *
2059 2059 * Input:
2060 2060 * dip = pointer to device information structure.
2061 2061 * component = device.
2062 2062 * level = power level.
2063 2063 *
2064 2064 * Returns:
2065 2065 * DDI_SUCCESS or DDI_FAILURE.
2066 2066 *
2067 2067 * Context:
2068 2068 * Kernel context.
2069 2069 */
2070 2070 /* ARGSUSED */
2071 2071 static int
2072 2072 ql_power(dev_info_t *dip, int component, int level)
2073 2073 {
2074 2074 int rval = DDI_FAILURE;
2075 2075 off_t csr;
2076 2076 uint8_t saved_pm_val;
2077 2077 ql_adapter_state_t *ha;
2078 2078 char *buf;
2079 2079 char *path;
2080 2080
2081 2081 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2082 2082 if (ha == NULL || ha->pm_capable == 0) {
2083 2083 QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2084 2084 ddi_get_instance(dip));
2085 2085 return (rval);
2086 2086 }
2087 2087
2088 2088 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2089 2089
2090 2090 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2091 2091 path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2092 2092
2093 2093 if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2094 2094 level != PM_LEVEL_D3)) {
2095 2095 EL(ha, "invalid, component=%xh or level=%xh\n",
2096 2096 component, level);
2097 2097 return (rval);
2098 2098 }
2099 2099
2100 2100 GLOBAL_HW_LOCK();
2101 2101 csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2102 2102 GLOBAL_HW_UNLOCK();
2103 2103
2104 2104 (void) snprintf(buf, sizeof (buf),
2105 2105 "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2106 2106 ddi_pathname(dip, path));
2107 2107
2108 2108 switch (level) {
2109 2109 case PM_LEVEL_D0: /* power up to D0 state - fully on */
2110 2110
2111 2111 QL_PM_LOCK(ha);
2112 2112 if (ha->power_level == PM_LEVEL_D0) {
2113 2113 QL_PM_UNLOCK(ha);
2114 2114 rval = DDI_SUCCESS;
2115 2115 break;
2116 2116 }
2117 2117
2118 2118 /*
2119 2119 * Enable interrupts now
2120 2120 */
2121 2121 saved_pm_val = ha->power_level;
2122 2122 ha->power_level = PM_LEVEL_D0;
2123 2123 QL_PM_UNLOCK(ha);
2124 2124
2125 2125 GLOBAL_HW_LOCK();
2126 2126
2127 2127 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2128 2128
2129 2129 /*
2130 2130 * Delay after reset, for chip to recover.
2131 2131 * Otherwise causes system PANIC
2132 2132 */
2133 2133 drv_usecwait(200000);
2134 2134
2135 2135 GLOBAL_HW_UNLOCK();
2136 2136
2137 2137 if (ha->config_saved) {
2138 2138 ha->config_saved = 0;
2139 2139 if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2140 2140 QL_PM_LOCK(ha);
2141 2141 ha->power_level = saved_pm_val;
2142 2142 QL_PM_UNLOCK(ha);
2143 2143 cmn_err(CE_WARN, "%s failed to restore "
2144 2144 "config regs", buf);
2145 2145 break;
2146 2146 }
2147 2147 }
2148 2148
2149 2149 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2150 2150 cmn_err(CE_WARN, "%s adapter initialization failed",
2151 2151 buf);
2152 2152 }
2153 2153
2154 2154 /* Wake up task_daemon. */
2155 2155 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2156 2156 TASK_DAEMON_SLEEPING_FLG, 0);
2157 2157
2158 2158 /* Restart IP if it was running. */
2159 2159 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2160 2160 (void) ql_initialize_ip(ha);
2161 2161 ql_isp_rcvbuf(ha);
2162 2162 }
2163 2163
2164 2164 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2165 2165 ha->instance, QL_NAME);
2166 2166
2167 2167 rval = DDI_SUCCESS;
2168 2168 break;
2169 2169
2170 2170 case PM_LEVEL_D3: /* power down to D3 state - off */
2171 2171
2172 2172 QL_PM_LOCK(ha);
2173 2173
2174 2174 if (ha->busy || ((ha->task_daemon_flags &
2175 2175 TASK_DAEMON_SLEEPING_FLG) == 0)) {
2176 2176 QL_PM_UNLOCK(ha);
2177 2177 break;
2178 2178 }
2179 2179
2180 2180 if (ha->power_level == PM_LEVEL_D3) {
2181 2181 rval = DDI_SUCCESS;
2182 2182 QL_PM_UNLOCK(ha);
2183 2183 break;
2184 2184 }
2185 2185 QL_PM_UNLOCK(ha);
2186 2186
2187 2187 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2188 2188 cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2189 2189 " config regs", QL_NAME, ha->instance, buf);
2190 2190 break;
2191 2191 }
2192 2192 ha->config_saved = 1;
2193 2193
2194 2194 /*
2195 2195 * Don't enable interrupts. Running mailbox commands with
2196 2196 * interrupts enabled could cause hangs since pm_run_scan()
2197 2197 * runs out of a callout thread and on single cpu systems
2198 2198 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2199 2199 * would not get to run.
2200 2200 */
2201 2201 TASK_DAEMON_LOCK(ha);
2202 2202 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2203 2203 TASK_DAEMON_UNLOCK(ha);
2204 2204
2205 2205 ql_halt(ha, PM_LEVEL_D3);
2206 2206
2207 2207 /*
2208 2208 * Setup ql_intr to ignore interrupts from here on.
2209 2209 */
2210 2210 QL_PM_LOCK(ha);
2211 2211 ha->power_level = PM_LEVEL_D3;
2212 2212 QL_PM_UNLOCK(ha);
2213 2213
2214 2214 /*
2215 2215 * Wait for ISR to complete.
2216 2216 */
2217 2217 INTR_LOCK(ha);
2218 2218 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2219 2219 INTR_UNLOCK(ha);
2220 2220
2221 2221 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2222 2222 ha->instance, QL_NAME);
2223 2223
2224 2224 rval = DDI_SUCCESS;
2225 2225 break;
2226 2226 }
2227 2227
2228 2228 kmem_free(buf, MAXPATHLEN);
2229 2229 kmem_free(path, MAXPATHLEN);
2230 2230
2231 2231 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2232 2232
2233 2233 return (rval);
2234 2234 }
2235 2235
2236 2236 /*
2237 2237 * ql_quiesce
2238 2238 * quiesce a device attached to the system.
2239 2239 *
2240 2240 * Input:
2241 2241 * dip = pointer to device information structure.
2242 2242 *
2243 2243 * Returns:
2244 2244 * DDI_SUCCESS
2245 2245 *
2246 2246 * Context:
2247 2247 * Kernel context.
2248 2248 */
2249 2249 static int
2250 2250 ql_quiesce(dev_info_t *dip)
2251 2251 {
2252 2252 ql_adapter_state_t *ha;
2253 2253 uint32_t timer;
2254 2254 uint32_t stat;
2255 2255
2256 2256 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2257 2257 if (ha == NULL) {
2258 2258 /* Oh well.... */
2259 2259 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2260 2260 ddi_get_instance(dip));
2261 2261 return (DDI_SUCCESS);
2262 2262 }
2263 2263
2264 2264 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2265 2265
2266 2266 if (CFG_IST(ha, CFG_CTRL_8021)) {
2267 2267 (void) ql_stop_firmware(ha);
2268 2268 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
2269 2269 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2270 2270 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2271 2271 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2272 2272 for (timer = 0; timer < 30000; timer++) {
2273 2273 stat = RD32_IO_REG(ha, risc2host);
2274 2274 if (stat & BIT_15) {
2275 2275 if ((stat & 0xff) < 0x12) {
2276 2276 WRT32_IO_REG(ha, hccr,
2277 2277 HC24_CLR_RISC_INT);
2278 2278 break;
2279 2279 }
2280 2280 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2281 2281 }
2282 2282 drv_usecwait(100);
2283 2283 }
2284 2284 /* Reset the chip. */
2285 2285 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2286 2286 MWB_4096_BYTES);
2287 2287 drv_usecwait(100);
2288 2288
2289 2289 } else {
2290 2290 /* Disable ISP interrupts. */
2291 2291 WRT16_IO_REG(ha, ictrl, 0);
2292 2292 /* Select RISC module registers. */
2293 2293 WRT16_IO_REG(ha, ctrl_status, 0);
2294 2294 /* Reset ISP semaphore. */
2295 2295 WRT16_IO_REG(ha, semaphore, 0);
2296 2296 /* Reset RISC module. */
2297 2297 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2298 2298 /* Release RISC module. */
2299 2299 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2300 2300 }
2301 2301
2302 2302 ql_disable_intr(ha);
2303 2303
2304 2304 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2305 2305
2306 2306 return (DDI_SUCCESS);
2307 2307 }
2308 2308
2309 2309 /* ************************************************************************ */
2310 2310 /* Fibre Channel Adapter (FCA) Transport Functions. */
2311 2311 /* ************************************************************************ */
2312 2312
2313 2313 /*
2314 2314 * ql_bind_port
2315 2315 * Handling port binding. The FC Transport attempts to bind an FCA port
2316 2316 * when it is ready to start transactions on the port. The FC Transport
2317 2317 * will call the fca_bind_port() function specified in the fca_transport
2318 2318 * structure it receives. The FCA must fill in the port_info structure
2319 2319 * passed in the call and also stash the information for future calls.
2320 2320 *
2321 2321 * Input:
2322 2322 * dip = pointer to FCA information structure.
2323 2323 * port_info = pointer to port information structure.
2324 2324 * bind_info = pointer to bind information structure.
2325 2325 *
2326 2326 * Returns:
2327 2327 * NULL = failure
2328 2328 *
2329 2329 * Context:
2330 2330 * Kernel context.
2331 2331 */
2332 2332 static opaque_t
2333 2333 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2334 2334 fc_fca_bind_info_t *bind_info)
2335 2335 {
2336 2336 ql_adapter_state_t *ha, *vha;
2337 2337 opaque_t fca_handle = NULL;
2338 2338 port_id_t d_id;
2339 2339 int port_npiv = bind_info->port_npiv;
2340 2340 uchar_t *port_nwwn = bind_info->port_nwwn.raw_wwn;
2341 2341 uchar_t *port_pwwn = bind_info->port_pwwn.raw_wwn;
2342 2342
2343 2343 /* get state info based on the dip */
2344 2344 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2345 2345 if (ha == NULL) {
2346 2346 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2347 2347 ddi_get_instance(dip));
2348 2348 return (NULL);
2349 2349 }
2350 2350 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2351 2351
2352 2352 /* Verify port number is supported. */
2353 2353 if (port_npiv != 0) {
2354 2354 if (!(ha->flags & VP_ENABLED)) {
2355 2355 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2356 2356 ha->instance);
2357 2357 port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2358 2358 return (NULL);
2359 2359 }
2360 2360 if (!(ha->flags & POINT_TO_POINT)) {
2361 2361 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2362 2362 ha->instance);
2363 2363 port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2364 2364 return (NULL);
2365 2365 }
2366 2366 if (!(ha->flags & FDISC_ENABLED)) {
2367 2367 QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2368 2368 "FDISC\n", ha->instance);
2369 2369 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2370 2370 return (NULL);
2371 2371 }
2372 2372 if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2373 2373 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2374 2374 QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2375 2375 "FC_OUTOFBOUNDS\n", ha->instance);
2376 2376 port_info->pi_error = FC_OUTOFBOUNDS;
2377 2377 return (NULL);
2378 2378 }
2379 2379 } else if (bind_info->port_num != 0) {
2380 2380 QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2381 2381 "supported\n", ha->instance, bind_info->port_num);
2382 2382 port_info->pi_error = FC_OUTOFBOUNDS;
2383 2383 return (NULL);
2384 2384 }
2385 2385
2386 2386 /* Locate port context. */
2387 2387 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2388 2388 if (vha->vp_index == bind_info->port_num) {
2389 2389 break;
2390 2390 }
2391 2391 }
2392 2392
2393 2393 /* If virtual port does not exist. */
2394 2394 if (vha == NULL) {
2395 2395 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2396 2396 }
2397 2397
2398 2398 /* make sure this port isn't already bound */
2399 2399 if (vha->flags & FCA_BOUND) {
2400 2400 port_info->pi_error = FC_ALREADY;
2401 2401 } else {
2402 2402 if (vha->vp_index != 0) {
2403 2403 bcopy(port_nwwn,
2404 2404 vha->loginparams.node_ww_name.raw_wwn, 8);
2405 2405 bcopy(port_pwwn,
2406 2406 vha->loginparams.nport_ww_name.raw_wwn, 8);
2407 2407 }
2408 2408 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2409 2409 if (ql_vport_enable(vha) != QL_SUCCESS) {
2410 2410 QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2411 2411 "virtual port=%d\n", ha->instance,
2412 2412 vha->vp_index);
2413 2413 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2414 2414 return (NULL);
2415 2415 }
2416 2416 cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2417 2417 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2418 2418 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2419 2419 QL_NAME, ha->instance, vha->vp_index,
2420 2420 port_pwwn[0], port_pwwn[1], port_pwwn[2],
2421 2421 port_pwwn[3], port_pwwn[4], port_pwwn[5],
2422 2422 port_pwwn[6], port_pwwn[7],
2423 2423 port_nwwn[0], port_nwwn[1], port_nwwn[2],
2424 2424 port_nwwn[3], port_nwwn[4], port_nwwn[5],
2425 2425 port_nwwn[6], port_nwwn[7]);
2426 2426 }
2427 2427
2428 2428 /* stash the bind_info supplied by the FC Transport */
2429 2429 vha->bind_info.port_handle = bind_info->port_handle;
2430 2430 vha->bind_info.port_statec_cb =
2431 2431 bind_info->port_statec_cb;
2432 2432 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2433 2433
2434 2434 /* Set port's source ID. */
2435 2435 port_info->pi_s_id.port_id = vha->d_id.b24;
2436 2436
2437 2437 /* copy out the default login parameters */
2438 2438 bcopy((void *)&vha->loginparams,
2439 2439 (void *)&port_info->pi_login_params,
2440 2440 sizeof (la_els_logi_t));
2441 2441
2442 2442 /* Set port's hard address if enabled. */
2443 2443 port_info->pi_hard_addr.hard_addr = 0;
2444 2444 if (bind_info->port_num == 0) {
2445 2445 d_id.b24 = ha->d_id.b24;
2446 2446 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2447 2447 if (ha->init_ctrl_blk.cb24.
2448 2448 firmware_options_1[0] & BIT_0) {
2449 2449 d_id.b.al_pa = ql_index_to_alpa[ha->
2450 2450 init_ctrl_blk.cb24.
2451 2451 hard_address[0]];
2452 2452 port_info->pi_hard_addr.hard_addr =
2453 2453 d_id.b24;
2454 2454 }
2455 2455 } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2456 2456 BIT_0) {
2457 2457 d_id.b.al_pa = ql_index_to_alpa[ha->
2458 2458 init_ctrl_blk.cb.hard_address[0]];
2459 2459 port_info->pi_hard_addr.hard_addr = d_id.b24;
2460 2460 }
2461 2461
2462 2462 /* Set the node id data */
2463 2463 if (ql_get_rnid_params(ha,
2464 2464 sizeof (port_info->pi_rnid_params.params),
2465 2465 (caddr_t)&port_info->pi_rnid_params.params) ==
2466 2466 QL_SUCCESS) {
2467 2467 port_info->pi_rnid_params.status = FC_SUCCESS;
2468 2468 } else {
2469 2469 port_info->pi_rnid_params.status = FC_FAILURE;
2470 2470 }
2471 2471
2472 2472 /* Populate T11 FC-HBA details */
2473 2473 ql_populate_hba_fru_details(ha, port_info);
2474 2474 ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2475 2475 KM_SLEEP);
2476 2476 if (ha->pi_attrs != NULL) {
2477 2477 bcopy(&port_info->pi_attrs, ha->pi_attrs,
2478 2478 sizeof (fca_port_attrs_t));
2479 2479 }
2480 2480 } else {
2481 2481 port_info->pi_rnid_params.status = FC_FAILURE;
2482 2482 if (ha->pi_attrs != NULL) {
2483 2483 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2484 2484 sizeof (fca_port_attrs_t));
2485 2485 }
2486 2486 }
2487 2487
2488 2488 /* Generate handle for this FCA. */
2489 2489 fca_handle = (opaque_t)vha;
2490 2490
2491 2491 ADAPTER_STATE_LOCK(ha);
2492 2492 vha->flags |= FCA_BOUND;
2493 2493 ADAPTER_STATE_UNLOCK(ha);
2494 2494 /* Set port's current state. */
2495 2495 port_info->pi_port_state = vha->state;
2496 2496 }
2497 2497
2498 2498 QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2499 2499 "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2500 2500 port_info->pi_port_state, port_info->pi_s_id.port_id);
2501 2501
2502 2502 return (fca_handle);
2503 2503 }
2504 2504
2505 2505 /*
2506 2506 * ql_unbind_port
2507 2507 * To unbind a Fibre Channel Adapter from an FC Port driver.
2508 2508 *
2509 2509 * Input:
2510 2510 * fca_handle = handle setup by ql_bind_port().
2511 2511 *
2512 2512 * Context:
2513 2513 * Kernel context.
2514 2514 */
2515 2515 static void
2516 2516 ql_unbind_port(opaque_t fca_handle)
2517 2517 {
2518 2518 ql_adapter_state_t *ha;
2519 2519 ql_tgt_t *tq;
2520 2520 uint32_t flgs;
2521 2521
2522 2522 ha = ql_fca_handle_to_state(fca_handle);
2523 2523 if (ha == NULL) {
2524 2524 /*EMPTY*/
2525 2525 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2526 2526 (void *)fca_handle);
2527 2527 } else {
2528 2528 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2529 2529 ha->vp_index);
2530 2530
2531 2531 if (!(ha->flags & FCA_BOUND)) {
2532 2532 /*EMPTY*/
2533 2533 QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2534 2534 ha->instance, ha->vp_index);
2535 2535 } else {
2536 2536 if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2537 2537 if ((tq = ql_loop_id_to_queue(ha,
2538 2538 FL_PORT_24XX_HDL)) != NULL) {
2539 2539 (void) ql_logout_fabric_port(ha, tq);
2540 2540 }
2541 2541 (void) ql_vport_control(ha, (uint8_t)
2542 2542 (CFG_IST(ha, CFG_CTRL_2425) ?
2543 2543 VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2544 2544 flgs = FCA_BOUND | VP_ENABLED;
2545 2545 } else {
2546 2546 flgs = FCA_BOUND;
2547 2547 }
2548 2548 ADAPTER_STATE_LOCK(ha);
2549 2549 ha->flags &= ~flgs;
2550 2550 ADAPTER_STATE_UNLOCK(ha);
2551 2551 }
2552 2552
2553 2553 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2554 2554 ha->vp_index);
2555 2555 }
2556 2556 }
2557 2557
2558 2558 /*
2559 2559 * ql_init_pkt
2560 2560 * Initialize FCA portion of packet.
2561 2561 *
2562 2562 * Input:
2563 2563 * fca_handle = handle setup by ql_bind_port().
2564 2564 * pkt = pointer to fc_packet.
2565 2565 *
2566 2566 * Returns:
2567 2567 * FC_SUCCESS - the packet has successfully been initialized.
2568 2568 * FC_UNBOUND - the fca_handle specified is not bound.
2569 2569 * FC_NOMEM - the FCA failed initialization due to an allocation error.
2570 2570 * FC_FAILURE - the FCA failed initialization for undisclosed reasons
2571 2571 *
2572 2572 * Context:
2573 2573 * Kernel context.
2574 2574 */
2575 2575 /* ARGSUSED */
2576 2576 static int
2577 2577 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2578 2578 {
2579 2579 ql_adapter_state_t *ha;
2580 2580 ql_srb_t *sp;
2581 2581 int rval = FC_SUCCESS;
2582 2582
2583 2583 ha = ql_fca_handle_to_state(fca_handle);
2584 2584 if (ha == NULL) {
2585 2585 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2586 2586 (void *)fca_handle);
2587 2587 return (FC_UNBOUND);
2588 2588 }
2589 2589 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2590 2590
2591 2591 sp = (ql_srb_t *)pkt->pkt_fca_private;
2592 2592 sp->flags = 0;
2593 2593
2594 2594 /* init cmd links */
2595 2595 sp->cmd.base_address = sp;
2596 2596 sp->cmd.prev = NULL;
2597 2597 sp->cmd.next = NULL;
2598 2598 sp->cmd.head = NULL;
2599 2599
2600 2600 /* init watchdog links */
2601 2601 sp->wdg.base_address = sp;
2602 2602 sp->wdg.prev = NULL;
2603 2603 sp->wdg.next = NULL;
2604 2604 sp->wdg.head = NULL;
2605 2605 sp->pkt = pkt;
2606 2606 sp->ha = ha;
2607 2607 sp->magic_number = QL_FCA_BRAND;
2608 2608 sp->sg_dma.dma_handle = NULL;
2609 2609 #ifndef __sparc
2610 2610 if (CFG_IST(ha, CFG_CTRL_8021)) {
2611 2611 /* Setup DMA for scatter gather list. */
2612 2612 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2613 2613 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2614 2614 sp->sg_dma.cookie_count = 1;
2615 2615 sp->sg_dma.alignment = 64;
2616 2616 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2617 2617 rval = FC_NOMEM;
2618 2618 }
2619 2619 }
2620 2620 #endif /* __sparc */
2621 2621
2622 2622 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2623 2623
2624 2624 return (rval);
2625 2625 }
2626 2626
2627 2627 /*
2628 2628 * ql_un_init_pkt
2629 2629 * Release all local resources bound to packet.
2630 2630 *
2631 2631 * Input:
2632 2632 * fca_handle = handle setup by ql_bind_port().
2633 2633 * pkt = pointer to fc_packet.
2634 2634 *
2635 2635 * Returns:
2636 2636 * FC_SUCCESS - the packet has successfully been invalidated.
2637 2637 * FC_UNBOUND - the fca_handle specified is not bound.
2638 2638 * FC_BADPACKET - the packet has not been initialized or has
2639 2639 * already been freed by this FCA.
2640 2640 *
2641 2641 * Context:
2642 2642 * Kernel context.
2643 2643 */
2644 2644 static int
2645 2645 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2646 2646 {
2647 2647 ql_adapter_state_t *ha;
2648 2648 int rval;
2649 2649 ql_srb_t *sp;
2650 2650
2651 2651 ha = ql_fca_handle_to_state(fca_handle);
2652 2652 if (ha == NULL) {
2653 2653 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2654 2654 (void *)fca_handle);
2655 2655 return (FC_UNBOUND);
2656 2656 }
2657 2657 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2658 2658
2659 2659 sp = (ql_srb_t *)pkt->pkt_fca_private;
2660 2660
2661 2661 if (sp->magic_number != QL_FCA_BRAND) {
2662 2662 EL(ha, "failed, FC_BADPACKET\n");
2663 2663 rval = FC_BADPACKET;
2664 2664 } else {
2665 2665 sp->magic_number = NULL;
2666 2666 ql_free_phys(ha, &sp->sg_dma);
2667 2667 rval = FC_SUCCESS;
2668 2668 }
2669 2669
2670 2670 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2671 2671
2672 2672 return (rval);
2673 2673 }
2674 2674
2675 2675 /*
2676 2676 * ql_els_send
2677 2677 * Issue a extended link service request.
2678 2678 *
2679 2679 * Input:
2680 2680 * fca_handle = handle setup by ql_bind_port().
2681 2681 * pkt = pointer to fc_packet.
2682 2682 *
2683 2683 * Returns:
2684 2684 * FC_SUCCESS - the command was successful.
2685 2685 * FC_ELS_FREJECT - the command was rejected by a Fabric.
2686 2686 * FC_ELS_PREJECT - the command was rejected by an N-port.
2687 2687 * FC_TRANSPORT_ERROR - a transport error occurred.
2688 2688 * FC_UNBOUND - the fca_handle specified is not bound.
2689 2689 * FC_ELS_BAD - the FCA can not issue the requested ELS.
2690 2690 *
2691 2691 * Context:
2692 2692 * Kernel context.
2693 2693 */
2694 2694 static int
2695 2695 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2696 2696 {
2697 2697 ql_adapter_state_t *ha;
2698 2698 int rval;
2699 2699 clock_t timer = drv_usectohz(30000000);
2700 2700 ls_code_t els;
2701 2701 la_els_rjt_t rjt;
2702 2702 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
2703 2703
2704 2704 /* Verify proper command. */
2705 2705 ha = ql_cmd_setup(fca_handle, pkt, &rval);
2706 2706 if (ha == NULL) {
2707 2707 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2708 2708 rval, fca_handle);
2709 2709 return (FC_INVALID_REQUEST);
2710 2710 }
2711 2711 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2712 2712
2713 2713 /* Wait for suspension to end. */
2714 2714 TASK_DAEMON_LOCK(ha);
2715 2715 while (ha->task_daemon_flags & QL_SUSPENDED) {
2716 2716 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2717 2717
2718 2718 /* 30 seconds from now */
2719 2719 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2720 2720 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2721 2721 /*
2722 2722 * The timeout time 'timer' was
2723 2723 * reached without the condition
2724 2724 * being signaled.
2725 2725 */
2726 2726 pkt->pkt_state = FC_PKT_TRAN_BSY;
2727 2727 pkt->pkt_reason = FC_REASON_XCHG_BSY;
2728 2728
2729 2729 /* Release task daemon lock. */
2730 2730 TASK_DAEMON_UNLOCK(ha);
2731 2731
2732 2732 EL(ha, "QL_SUSPENDED failed=%xh\n",
2733 2733 QL_FUNCTION_TIMEOUT);
2734 2734 return (FC_TRAN_BUSY);
2735 2735 }
2736 2736 }
2737 2737 /* Release task daemon lock. */
2738 2738 TASK_DAEMON_UNLOCK(ha);
2739 2739
2740 2740 /* Setup response header. */
2741 2741 bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2742 2742 sizeof (fc_frame_hdr_t));
2743 2743
2744 2744 if (pkt->pkt_rsplen) {
2745 2745 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2746 2746 }
2747 2747
2748 2748 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2749 2749 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2750 2750 pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2751 2751 R_CTL_SOLICITED_CONTROL;
2752 2752 pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2753 2753 F_CTL_END_SEQ;
2754 2754
2755 2755 sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2756 2756 SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2757 2757 SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2758 2758
2759 2759 sp->flags |= SRB_ELS_PKT;
2760 2760
2761 2761 /* map the type of ELS to a function */
2762 2762 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2763 2763 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2764 2764
2765 2765 #if 0
2766 2766 QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2767 2767 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2768 2768 sizeof (fc_frame_hdr_t) / 4);
2769 2769 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2770 2770 QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2771 2771 #endif
2772 2772
2773 2773 sp->iocb = ha->els_cmd;
2774 2774 sp->req_cnt = 1;
2775 2775
2776 2776 switch (els.ls_code) {
2777 2777 case LA_ELS_RJT:
2778 2778 case LA_ELS_ACC:
2779 2779 EL(ha, "LA_ELS_RJT\n");
2780 2780 pkt->pkt_state = FC_PKT_SUCCESS;
2781 2781 rval = FC_SUCCESS;
2782 2782 break;
2783 2783 case LA_ELS_PLOGI:
2784 2784 case LA_ELS_PDISC:
2785 2785 rval = ql_els_plogi(ha, pkt);
2786 2786 break;
2787 2787 case LA_ELS_FLOGI:
2788 2788 case LA_ELS_FDISC:
2789 2789 rval = ql_els_flogi(ha, pkt);
2790 2790 break;
2791 2791 case LA_ELS_LOGO:
2792 2792 rval = ql_els_logo(ha, pkt);
2793 2793 break;
2794 2794 case LA_ELS_PRLI:
2795 2795 rval = ql_els_prli(ha, pkt);
2796 2796 break;
2797 2797 case LA_ELS_PRLO:
2798 2798 rval = ql_els_prlo(ha, pkt);
2799 2799 break;
2800 2800 case LA_ELS_ADISC:
2801 2801 rval = ql_els_adisc(ha, pkt);
2802 2802 break;
2803 2803 case LA_ELS_LINIT:
2804 2804 rval = ql_els_linit(ha, pkt);
2805 2805 break;
2806 2806 case LA_ELS_LPC:
2807 2807 rval = ql_els_lpc(ha, pkt);
2808 2808 break;
2809 2809 case LA_ELS_LSTS:
2810 2810 rval = ql_els_lsts(ha, pkt);
2811 2811 break;
2812 2812 case LA_ELS_SCR:
2813 2813 rval = ql_els_scr(ha, pkt);
2814 2814 break;
2815 2815 case LA_ELS_RSCN:
2816 2816 rval = ql_els_rscn(ha, pkt);
2817 2817 break;
2818 2818 case LA_ELS_FARP_REQ:
2819 2819 rval = ql_els_farp_req(ha, pkt);
2820 2820 break;
2821 2821 case LA_ELS_FARP_REPLY:
2822 2822 rval = ql_els_farp_reply(ha, pkt);
2823 2823 break;
2824 2824 case LA_ELS_RLS:
2825 2825 rval = ql_els_rls(ha, pkt);
2826 2826 break;
2827 2827 case LA_ELS_RNID:
2828 2828 rval = ql_els_rnid(ha, pkt);
2829 2829 break;
2830 2830 default:
2831 2831 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2832 2832 els.ls_code);
2833 2833 /* Build RJT. */
2834 2834 bzero(&rjt, sizeof (rjt));
2835 2835 rjt.ls_code.ls_code = LA_ELS_RJT;
2836 2836 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2837 2837
2838 2838 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2839 2839 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2840 2840
2841 2841 pkt->pkt_state = FC_PKT_LOCAL_RJT;
2842 2842 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2843 2843 rval = FC_SUCCESS;
2844 2844 break;
2845 2845 }
2846 2846
2847 2847 #if 0
2848 2848 QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2849 2849 QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2850 2850 sizeof (fc_frame_hdr_t) / 4);
2851 2851 #endif
2852 2852 /*
2853 2853 * Return success if the srb was consumed by an iocb. The packet
2854 2854 * completion callback will be invoked by the response handler.
2855 2855 */
2856 2856 if (rval == QL_CONSUMED) {
2857 2857 rval = FC_SUCCESS;
2858 2858 } else if (rval == FC_SUCCESS &&
2859 2859 !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2860 2860 /* Do command callback only if no error */
2861 2861 ql_awaken_task_daemon(ha, sp, 0, 0);
2862 2862 }
2863 2863
2864 2864 if (rval != FC_SUCCESS) {
2865 2865 EL(ha, "failed, rval = %xh\n", rval);
2866 2866 } else {
2867 2867 /*EMPTY*/
2868 2868 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2869 2869 }
2870 2870 return (rval);
2871 2871 }
2872 2872
2873 2873 /*
2874 2874 * ql_get_cap
2875 2875 * Export FCA hardware and software capabilities.
2876 2876 *
2877 2877 * Input:
2878 2878 * fca_handle = handle setup by ql_bind_port().
2879 2879 * cap = pointer to the capabilities string.
2880 2880 * ptr = buffer pointer for return capability.
2881 2881 *
2882 2882 * Returns:
2883 2883 * FC_CAP_ERROR - no such capability
2884 2884 * FC_CAP_FOUND - the capability was returned and cannot be set
2885 2885 * FC_CAP_SETTABLE - the capability was returned and can be set
2886 2886 * FC_UNBOUND - the fca_handle specified is not bound.
2887 2887 *
2888 2888 * Context:
2889 2889 * Kernel context.
2890 2890 */
2891 2891 static int
2892 2892 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2893 2893 {
2894 2894 ql_adapter_state_t *ha;
2895 2895 int rval;
2896 2896 uint32_t *rptr = (uint32_t *)ptr;
2897 2897
2898 2898 ha = ql_fca_handle_to_state(fca_handle);
2899 2899 if (ha == NULL) {
2900 2900 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2901 2901 (void *)fca_handle);
2902 2902 return (FC_UNBOUND);
2903 2903 }
2904 2904 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2905 2905
2906 2906 if (strcmp(cap, FC_NODE_WWN) == 0) {
2907 2907 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2908 2908 ptr, 8);
2909 2909 rval = FC_CAP_FOUND;
2910 2910 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2911 2911 bcopy((void *)&ha->loginparams, ptr,
2912 2912 sizeof (la_els_logi_t));
2913 2913 rval = FC_CAP_FOUND;
2914 2914 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2915 2915 *rptr = (uint32_t)QL_UB_LIMIT;
2916 2916 rval = FC_CAP_FOUND;
2917 2917 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2918 2918
2919 2919 dev_info_t *psydip = NULL;
2920 2920 #ifdef __sparc
2921 2921 /*
2922 2922 * Disable streaming for certain 2 chip adapters
2923 2923 * below Psycho to handle Psycho byte hole issue.
2924 2924 */
2925 2925 if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) &&
2926 2926 (!CFG_IST(ha, CFG_SBUS_CARD))) {
2927 2927 for (psydip = ddi_get_parent(ha->dip); psydip;
2928 2928 psydip = ddi_get_parent(psydip)) {
2929 2929 if (strcmp(ddi_driver_name(psydip),
2930 2930 "pcipsy") == 0) {
2931 2931 break;
2932 2932 }
2933 2933 }
2934 2934 }
2935 2935 #endif /* __sparc */
2936 2936
2937 2937 if (psydip) {
2938 2938 *rptr = (uint32_t)FC_NO_STREAMING;
2939 2939 EL(ha, "No Streaming\n");
2940 2940 } else {
2941 2941 *rptr = (uint32_t)FC_ALLOW_STREAMING;
2942 2942 EL(ha, "Allow Streaming\n");
2943 2943 }
2944 2944 rval = FC_CAP_FOUND;
2945 2945 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2946 2946 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2947 2947 *rptr = (uint32_t)CHAR_TO_SHORT(
2948 2948 ha->init_ctrl_blk.cb24.max_frame_length[0],
2949 2949 ha->init_ctrl_blk.cb24.max_frame_length[1]);
2950 2950 } else {
2951 2951 *rptr = (uint32_t)CHAR_TO_SHORT(
2952 2952 ha->init_ctrl_blk.cb.max_frame_length[0],
2953 2953 ha->init_ctrl_blk.cb.max_frame_length[1]);
2954 2954 }
2955 2955 rval = FC_CAP_FOUND;
2956 2956 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2957 2957 *rptr = FC_RESET_RETURN_ALL;
2958 2958 rval = FC_CAP_FOUND;
2959 2959 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2960 2960 *rptr = FC_NO_DVMA_SPACE;
2961 2961 rval = FC_CAP_FOUND;
2962 2962 } else {
2963 2963 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2964 2964 rval = FC_CAP_ERROR;
2965 2965 }
2966 2966
2967 2967 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2968 2968
2969 2969 return (rval);
2970 2970 }
2971 2971
2972 2972 /*
2973 2973 * ql_set_cap
2974 2974 * Allow the FC Transport to set FCA capabilities if possible.
2975 2975 *
2976 2976 * Input:
2977 2977 * fca_handle = handle setup by ql_bind_port().
2978 2978 * cap = pointer to the capabilities string.
2979 2979 * ptr = buffer pointer for capability.
2980 2980 *
2981 2981 * Returns:
2982 2982 * FC_CAP_ERROR - no such capability
2983 2983 * FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2984 2984 * FC_CAP_SETTABLE - the capability was successfully set.
2985 2985 * FC_UNBOUND - the fca_handle specified is not bound.
2986 2986 *
2987 2987 * Context:
2988 2988 * Kernel context.
2989 2989 */
2990 2990 /* ARGSUSED */
2991 2991 static int
2992 2992 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2993 2993 {
2994 2994 ql_adapter_state_t *ha;
2995 2995 int rval;
2996 2996
2997 2997 ha = ql_fca_handle_to_state(fca_handle);
2998 2998 if (ha == NULL) {
2999 2999 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3000 3000 (void *)fca_handle);
3001 3001 return (FC_UNBOUND);
3002 3002 }
3003 3003 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3004 3004
3005 3005 if (strcmp(cap, FC_NODE_WWN) == 0) {
3006 3006 rval = FC_CAP_FOUND;
3007 3007 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
3008 3008 rval = FC_CAP_FOUND;
3009 3009 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
3010 3010 rval = FC_CAP_FOUND;
3011 3011 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
3012 3012 rval = FC_CAP_FOUND;
3013 3013 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
3014 3014 rval = FC_CAP_FOUND;
3015 3015 } else {
3016 3016 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
3017 3017 rval = FC_CAP_ERROR;
3018 3018 }
3019 3019
3020 3020 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3021 3021
3022 3022 return (rval);
3023 3023 }
3024 3024
3025 3025 /*
3026 3026 * ql_getmap
3027 3027 * Request of Arbitrated Loop (AL-PA) map.
3028 3028 *
3029 3029 * Input:
3030 3030 * fca_handle = handle setup by ql_bind_port().
3031 3031 * mapbuf= buffer pointer for map.
3032 3032 *
3033 3033 * Returns:
3034 3034 * FC_OLDPORT - the specified port is not operating in loop mode.
3035 3035 * FC_OFFLINE - the specified port is not online.
3036 3036 * FC_NOMAP - there is no loop map available for this port.
3037 3037 * FC_UNBOUND - the fca_handle specified is not bound.
3038 3038 * FC_SUCCESS - a valid map has been placed in mapbuf.
3039 3039 *
3040 3040 * Context:
3041 3041 * Kernel context.
3042 3042 */
3043 3043 static int
3044 3044 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3045 3045 {
3046 3046 ql_adapter_state_t *ha;
3047 3047 clock_t timer = drv_usectohz(30000000);
3048 3048 int rval = FC_SUCCESS;
3049 3049
3050 3050 ha = ql_fca_handle_to_state(fca_handle);
3051 3051 if (ha == NULL) {
3052 3052 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3053 3053 (void *)fca_handle);
3054 3054 return (FC_UNBOUND);
3055 3055 }
3056 3056 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3057 3057
3058 3058 mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3059 3059 mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3060 3060
3061 3061 /* Wait for suspension to end. */
3062 3062 TASK_DAEMON_LOCK(ha);
3063 3063 while (ha->task_daemon_flags & QL_SUSPENDED) {
3064 3064 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3065 3065
3066 3066 /* 30 seconds from now */
3067 3067 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3068 3068 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3069 3069 /*
3070 3070 * The timeout time 'timer' was
3071 3071 * reached without the condition
3072 3072 * being signaled.
3073 3073 */
3074 3074
3075 3075 /* Release task daemon lock. */
3076 3076 TASK_DAEMON_UNLOCK(ha);
3077 3077
3078 3078 EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3079 3079 return (FC_TRAN_BUSY);
3080 3080 }
3081 3081 }
3082 3082 /* Release task daemon lock. */
3083 3083 TASK_DAEMON_UNLOCK(ha);
3084 3084
3085 3085 if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3086 3086 (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3087 3087 /*
3088 3088 * Now, since transport drivers cosider this as an
3089 3089 * offline condition, let's wait for few seconds
3090 3090 * for any loop transitions before we reset the.
3091 3091 * chip and restart all over again.
3092 3092 */
3093 3093 ql_delay(ha, 2000000);
3094 3094 EL(ha, "failed, FC_NOMAP\n");
3095 3095 rval = FC_NOMAP;
3096 3096 } else {
3097 3097 /*EMPTY*/
3098 3098 QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3099 3099 "data %xh %xh %xh %xh\n", ha->instance,
3100 3100 mapbuf->lilp_myalpa, mapbuf->lilp_length,
3101 3101 mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3102 3102 mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3103 3103 }
3104 3104
3105 3105 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3106 3106 #if 0
3107 3107 QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3108 3108 #endif
3109 3109 return (rval);
3110 3110 }
3111 3111
3112 3112 /*
3113 3113 * ql_transport
3114 3114 * Issue an I/O request. Handles all regular requests.
3115 3115 *
3116 3116 * Input:
3117 3117 * fca_handle = handle setup by ql_bind_port().
3118 3118 * pkt = pointer to fc_packet.
3119 3119 *
3120 3120 * Returns:
3121 3121 * FC_SUCCESS - the packet was accepted for transport.
3122 3122 * FC_TRANSPORT_ERROR - a transport error occurred.
3123 3123 * FC_BADPACKET - the packet to be transported had not been
3124 3124 * initialized by this FCA.
3125 3125 * FC_UNBOUND - the fca_handle specified is not bound.
3126 3126 *
3127 3127 * Context:
3128 3128 * Kernel context.
3129 3129 */
3130 3130 static int
3131 3131 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3132 3132 {
3133 3133 ql_adapter_state_t *ha;
3134 3134 int rval = FC_TRANSPORT_ERROR;
3135 3135 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3136 3136
3137 3137 /* Verify proper command. */
3138 3138 ha = ql_cmd_setup(fca_handle, pkt, &rval);
3139 3139 if (ha == NULL) {
3140 3140 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3141 3141 rval, fca_handle);
3142 3142 return (rval);
3143 3143 }
3144 3144 QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3145 3145 #if 0
3146 3146 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3147 3147 sizeof (fc_frame_hdr_t) / 4);
3148 3148 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3149 3149 QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3150 3150 #endif
3151 3151
3152 3152 /* Reset SRB flags. */
3153 3153 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3154 3154 SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3155 3155 SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3156 3156 SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3157 3157 SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3158 3158 SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3159 3159 SRB_MS_PKT | SRB_ELS_PKT);
3160 3160
3161 3161 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3162 3162 pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3163 3163 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3164 3164 pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3165 3165 pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3166 3166
3167 3167 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3168 3168 case R_CTL_COMMAND:
3169 3169 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3170 3170 sp->flags |= SRB_FCP_CMD_PKT;
3171 3171 rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3172 3172 }
3173 3173 break;
3174 3174
3175 3175 default:
3176 3176 /* Setup response header and buffer. */
3177 3177 if (pkt->pkt_rsplen) {
3178 3178 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3179 3179 }
3180 3180
3181 3181 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3182 3182 case R_CTL_UNSOL_DATA:
3183 3183 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3184 3184 sp->flags |= SRB_IP_PKT;
3185 3185 rval = ql_fcp_ip_cmd(ha, pkt, sp);
3186 3186 }
3187 3187 break;
3188 3188
3189 3189 case R_CTL_UNSOL_CONTROL:
3190 3190 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3191 3191 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3192 3192 rval = ql_fc_services(ha, pkt);
3193 3193 }
3194 3194 break;
3195 3195
3196 3196 case R_CTL_SOLICITED_DATA:
3197 3197 case R_CTL_STATUS:
3198 3198 default:
3199 3199 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3200 3200 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3201 3201 rval = FC_TRANSPORT_ERROR;
3202 3202 EL(ha, "unknown, r_ctl=%xh\n",
3203 3203 pkt->pkt_cmd_fhdr.r_ctl);
3204 3204 break;
3205 3205 }
3206 3206 }
3207 3207
3208 3208 if (rval != FC_SUCCESS) {
3209 3209 EL(ha, "failed, rval = %xh\n", rval);
3210 3210 } else {
3211 3211 /*EMPTY*/
3212 3212 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3213 3213 }
3214 3214
3215 3215 return (rval);
3216 3216 }
3217 3217
3218 3218 /*
3219 3219 * ql_ub_alloc
3220 3220 * Allocate buffers for unsolicited exchanges.
3221 3221 *
3222 3222 * Input:
3223 3223 * fca_handle = handle setup by ql_bind_port().
3224 3224 * tokens = token array for each buffer.
3225 3225 * size = size of each buffer.
3226 3226 * count = pointer to number of buffers.
3227 3227 * type = the FC-4 type the buffers are reserved for.
3228 3228 * 1 = Extended Link Services, 5 = LLC/SNAP
3229 3229 *
3230 3230 * Returns:
3231 3231 * FC_FAILURE - buffers could not be allocated.
3232 3232 * FC_TOOMANY - the FCA could not allocate the requested
3233 3233 * number of buffers.
3234 3234 * FC_SUCCESS - unsolicited buffers were allocated.
3235 3235 * FC_UNBOUND - the fca_handle specified is not bound.
3236 3236 *
3237 3237 * Context:
3238 3238 * Kernel context.
3239 3239 */
3240 3240 static int
3241 3241 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3242 3242 uint32_t *count, uint32_t type)
3243 3243 {
3244 3244 ql_adapter_state_t *ha;
3245 3245 caddr_t bufp = NULL;
3246 3246 fc_unsol_buf_t *ubp;
3247 3247 ql_srb_t *sp;
3248 3248 uint32_t index;
3249 3249 uint32_t cnt;
3250 3250 uint32_t ub_array_index = 0;
3251 3251 int rval = FC_SUCCESS;
3252 3252 int ub_updated = FALSE;
3253 3253
3254 3254 /* Check handle. */
3255 3255 ha = ql_fca_handle_to_state(fca_handle);
3256 3256 if (ha == NULL) {
3257 3257 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3258 3258 (void *)fca_handle);
3259 3259 return (FC_UNBOUND);
3260 3260 }
3261 3261 QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3262 3262 ha->instance, ha->vp_index, *count);
3263 3263
3264 3264 QL_PM_LOCK(ha);
3265 3265 if (ha->power_level != PM_LEVEL_D0) {
3266 3266 QL_PM_UNLOCK(ha);
3267 3267 QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3268 3268 ha->vp_index);
3269 3269 return (FC_FAILURE);
3270 3270 }
3271 3271 QL_PM_UNLOCK(ha);
3272 3272
3273 3273 /* Acquire adapter state lock. */
3274 3274 ADAPTER_STATE_LOCK(ha);
3275 3275
3276 3276 /* Check the count. */
3277 3277 if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3278 3278 *count = 0;
3279 3279 EL(ha, "failed, FC_TOOMANY\n");
3280 3280 rval = FC_TOOMANY;
3281 3281 }
3282 3282
3283 3283 /*
3284 3284 * reset ub_array_index
3285 3285 */
3286 3286 ub_array_index = 0;
3287 3287
3288 3288 /*
3289 3289 * Now proceed to allocate any buffers required
3290 3290 */
3291 3291 for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3292 3292 /* Allocate all memory needed. */
3293 3293 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3294 3294 KM_SLEEP);
3295 3295 if (ubp == NULL) {
3296 3296 EL(ha, "failed, FC_FAILURE\n");
3297 3297 rval = FC_FAILURE;
3298 3298 } else {
3299 3299 sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3300 3300 if (sp == NULL) {
3301 3301 kmem_free(ubp, sizeof (fc_unsol_buf_t));
3302 3302 rval = FC_FAILURE;
3303 3303 } else {
3304 3304 if (type == FC_TYPE_IS8802_SNAP) {
3305 3305 #ifdef __sparc
3306 3306 if (ql_get_dma_mem(ha,
3307 3307 &sp->ub_buffer, size,
3308 3308 BIG_ENDIAN_DMA,
3309 3309 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3310 3310 rval = FC_FAILURE;
3311 3311 kmem_free(ubp,
3312 3312 sizeof (fc_unsol_buf_t));
3313 3313 kmem_free(sp,
3314 3314 sizeof (ql_srb_t));
3315 3315 } else {
3316 3316 bufp = sp->ub_buffer.bp;
3317 3317 sp->ub_size = size;
3318 3318 }
3319 3319 #else
3320 3320 if (ql_get_dma_mem(ha,
3321 3321 &sp->ub_buffer, size,
3322 3322 LITTLE_ENDIAN_DMA,
3323 3323 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3324 3324 rval = FC_FAILURE;
3325 3325 kmem_free(ubp,
3326 3326 sizeof (fc_unsol_buf_t));
3327 3327 kmem_free(sp,
3328 3328 sizeof (ql_srb_t));
3329 3329 } else {
3330 3330 bufp = sp->ub_buffer.bp;
3331 3331 sp->ub_size = size;
3332 3332 }
3333 3333 #endif
3334 3334 } else {
3335 3335 bufp = kmem_zalloc(size, KM_SLEEP);
3336 3336 if (bufp == NULL) {
3337 3337 rval = FC_FAILURE;
3338 3338 kmem_free(ubp,
3339 3339 sizeof (fc_unsol_buf_t));
3340 3340 kmem_free(sp,
3341 3341 sizeof (ql_srb_t));
3342 3342 } else {
3343 3343 sp->ub_size = size;
3344 3344 }
3345 3345 }
3346 3346 }
3347 3347 }
3348 3348
3349 3349 if (rval == FC_SUCCESS) {
3350 3350 /* Find next available slot. */
3351 3351 QL_UB_LOCK(ha);
3352 3352 while (ha->ub_array[ub_array_index] != NULL) {
3353 3353 ub_array_index++;
3354 3354 }
3355 3355
3356 3356 ubp->ub_fca_private = (void *)sp;
3357 3357
3358 3358 /* init cmd links */
3359 3359 sp->cmd.base_address = sp;
3360 3360 sp->cmd.prev = NULL;
3361 3361 sp->cmd.next = NULL;
3362 3362 sp->cmd.head = NULL;
3363 3363
3364 3364 /* init wdg links */
3365 3365 sp->wdg.base_address = sp;
3366 3366 sp->wdg.prev = NULL;
3367 3367 sp->wdg.next = NULL;
3368 3368 sp->wdg.head = NULL;
3369 3369 sp->ha = ha;
3370 3370
3371 3371 ubp->ub_buffer = bufp;
3372 3372 ubp->ub_bufsize = size;
3373 3373 ubp->ub_port_handle = fca_handle;
3374 3374 ubp->ub_token = ub_array_index;
3375 3375
3376 3376 /* Save the token. */
3377 3377 tokens[index] = ub_array_index;
3378 3378
3379 3379 /* Setup FCA private information. */
3380 3380 sp->ub_type = type;
3381 3381 sp->handle = ub_array_index;
3382 3382 sp->flags |= SRB_UB_IN_FCA;
3383 3383
3384 3384 ha->ub_array[ub_array_index] = ubp;
3385 3385 ha->ub_allocated++;
3386 3386 ub_updated = TRUE;
3387 3387 QL_UB_UNLOCK(ha);
3388 3388 }
3389 3389 }
3390 3390
3391 3391 /* Release adapter state lock. */
3392 3392 ADAPTER_STATE_UNLOCK(ha);
3393 3393
3394 3394 /* IP buffer. */
3395 3395 if (ub_updated) {
3396 3396 if ((type == FC_TYPE_IS8802_SNAP) &&
3397 3397 (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3398 3398
3399 3399 ADAPTER_STATE_LOCK(ha);
3400 3400 ha->flags |= IP_ENABLED;
3401 3401 ADAPTER_STATE_UNLOCK(ha);
3402 3402
3403 3403 if (!(ha->flags & IP_INITIALIZED)) {
3404 3404 if (CFG_IST(ha, CFG_CTRL_2422)) {
3405 3405 ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3406 3406 LSB(ql_ip_mtu);
3407 3407 ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3408 3408 MSB(ql_ip_mtu);
3409 3409 ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3410 3410 LSB(size);
3411 3411 ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3412 3412 MSB(size);
3413 3413
3414 3414 cnt = CHAR_TO_SHORT(
3415 3415 ha->ip_init_ctrl_blk.cb24.cc[0],
3416 3416 ha->ip_init_ctrl_blk.cb24.cc[1]);
3417 3417
3418 3418 if (cnt < *count) {
3419 3419 ha->ip_init_ctrl_blk.cb24.cc[0]
3420 3420 = LSB(*count);
3421 3421 ha->ip_init_ctrl_blk.cb24.cc[1]
3422 3422 = MSB(*count);
3423 3423 }
3424 3424 } else {
3425 3425 ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3426 3426 LSB(ql_ip_mtu);
3427 3427 ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3428 3428 MSB(ql_ip_mtu);
3429 3429 ha->ip_init_ctrl_blk.cb.buf_size[0] =
3430 3430 LSB(size);
3431 3431 ha->ip_init_ctrl_blk.cb.buf_size[1] =
3432 3432 MSB(size);
3433 3433
3434 3434 cnt = CHAR_TO_SHORT(
3435 3435 ha->ip_init_ctrl_blk.cb.cc[0],
3436 3436 ha->ip_init_ctrl_blk.cb.cc[1]);
3437 3437
3438 3438 if (cnt < *count) {
3439 3439 ha->ip_init_ctrl_blk.cb.cc[0] =
3440 3440 LSB(*count);
3441 3441 ha->ip_init_ctrl_blk.cb.cc[1] =
3442 3442 MSB(*count);
3443 3443 }
3444 3444 }
3445 3445
3446 3446 (void) ql_initialize_ip(ha);
3447 3447 }
3448 3448 ql_isp_rcvbuf(ha);
3449 3449 }
3450 3450 }
3451 3451
3452 3452 if (rval != FC_SUCCESS) {
3453 3453 EL(ha, "failed=%xh\n", rval);
3454 3454 } else {
3455 3455 /*EMPTY*/
3456 3456 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3457 3457 ha->vp_index);
3458 3458 }
3459 3459 return (rval);
3460 3460 }
3461 3461
3462 3462 /*
3463 3463 * ql_ub_free
3464 3464 * Free unsolicited buffers.
3465 3465 *
3466 3466 * Input:
3467 3467 * fca_handle = handle setup by ql_bind_port().
3468 3468 * count = number of buffers.
3469 3469 * tokens = token array for each buffer.
3470 3470 *
3471 3471 * Returns:
3472 3472 * FC_SUCCESS - the requested buffers have been freed.
3473 3473 * FC_UNBOUND - the fca_handle specified is not bound.
3474 3474 * FC_UB_BADTOKEN - an invalid token was encountered.
3475 3475 * No buffers have been released.
3476 3476 *
3477 3477 * Context:
3478 3478 * Kernel context.
3479 3479 */
3480 3480 static int
3481 3481 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3482 3482 {
3483 3483 ql_adapter_state_t *ha;
3484 3484 ql_srb_t *sp;
3485 3485 uint32_t index;
3486 3486 uint64_t ub_array_index;
3487 3487 int rval = FC_SUCCESS;
3488 3488
3489 3489 /* Check handle. */
3490 3490 ha = ql_fca_handle_to_state(fca_handle);
3491 3491 if (ha == NULL) {
3492 3492 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3493 3493 (void *)fca_handle);
3494 3494 return (FC_UNBOUND);
3495 3495 }
3496 3496 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3497 3497
3498 3498 /* Acquire adapter state lock. */
3499 3499 ADAPTER_STATE_LOCK(ha);
3500 3500
3501 3501 /* Check all returned tokens. */
3502 3502 for (index = 0; index < count; index++) {
3503 3503 fc_unsol_buf_t *ubp;
3504 3504
3505 3505 /* Check the token range. */
3506 3506 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3507 3507 EL(ha, "failed, FC_UB_BADTOKEN\n");
3508 3508 rval = FC_UB_BADTOKEN;
3509 3509 break;
3510 3510 }
3511 3511
3512 3512 /* Check the unsolicited buffer array. */
3513 3513 QL_UB_LOCK(ha);
3514 3514 ubp = ha->ub_array[ub_array_index];
3515 3515
3516 3516 if (ubp == NULL) {
3517 3517 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3518 3518 rval = FC_UB_BADTOKEN;
3519 3519 QL_UB_UNLOCK(ha);
3520 3520 break;
3521 3521 }
3522 3522
3523 3523 /* Check the state of the unsolicited buffer. */
3524 3524 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3525 3525 sp->flags |= SRB_UB_FREE_REQUESTED;
3526 3526
3527 3527 while (!(sp->flags & SRB_UB_IN_FCA) ||
3528 3528 (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3529 3529 QL_UB_UNLOCK(ha);
3530 3530 ADAPTER_STATE_UNLOCK(ha);
3531 3531 delay(drv_usectohz(100000));
3532 3532 ADAPTER_STATE_LOCK(ha);
3533 3533 QL_UB_LOCK(ha);
3534 3534 }
3535 3535 ha->ub_array[ub_array_index] = NULL;
3536 3536 QL_UB_UNLOCK(ha);
3537 3537 ql_free_unsolicited_buffer(ha, ubp);
3538 3538 }
3539 3539
3540 3540 if (rval == FC_SUCCESS) {
3541 3541 /*
3542 3542 * Signal any pending hardware reset when there are
3543 3543 * no more unsolicited buffers in use.
3544 3544 */
3545 3545 if (ha->ub_allocated == 0) {
3546 3546 cv_broadcast(&ha->pha->cv_ub);
3547 3547 }
3548 3548 }
3549 3549
3550 3550 /* Release adapter state lock. */
3551 3551 ADAPTER_STATE_UNLOCK(ha);
3552 3552
3553 3553 if (rval != FC_SUCCESS) {
3554 3554 EL(ha, "failed=%xh\n", rval);
3555 3555 } else {
3556 3556 /*EMPTY*/
3557 3557 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3558 3558 }
3559 3559 return (rval);
3560 3560 }
3561 3561
3562 3562 /*
3563 3563 * ql_ub_release
3564 3564 * Release unsolicited buffers from FC Transport
3565 3565 * to FCA for future use.
3566 3566 *
3567 3567 * Input:
3568 3568 * fca_handle = handle setup by ql_bind_port().
3569 3569 * count = number of buffers.
3570 3570 * tokens = token array for each buffer.
3571 3571 *
3572 3572 * Returns:
3573 3573 * FC_SUCCESS - the requested buffers have been released.
3574 3574 * FC_UNBOUND - the fca_handle specified is not bound.
3575 3575 * FC_UB_BADTOKEN - an invalid token was encountered.
3576 3576 * No buffers have been released.
3577 3577 *
3578 3578 * Context:
3579 3579 * Kernel context.
3580 3580 */
3581 3581 static int
3582 3582 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3583 3583 {
3584 3584 ql_adapter_state_t *ha;
3585 3585 ql_srb_t *sp;
3586 3586 uint32_t index;
3587 3587 uint64_t ub_array_index;
3588 3588 int rval = FC_SUCCESS;
3589 3589 int ub_ip_updated = FALSE;
3590 3590
3591 3591 /* Check handle. */
3592 3592 ha = ql_fca_handle_to_state(fca_handle);
3593 3593 if (ha == NULL) {
3594 3594 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3595 3595 (void *)fca_handle);
3596 3596 return (FC_UNBOUND);
3597 3597 }
3598 3598 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3599 3599
3600 3600 /* Acquire adapter state lock. */
3601 3601 ADAPTER_STATE_LOCK(ha);
3602 3602 QL_UB_LOCK(ha);
3603 3603
3604 3604 /* Check all returned tokens. */
3605 3605 for (index = 0; index < count; index++) {
3606 3606 /* Check the token range. */
3607 3607 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3608 3608 EL(ha, "failed, FC_UB_BADTOKEN\n");
3609 3609 rval = FC_UB_BADTOKEN;
3610 3610 break;
3611 3611 }
3612 3612
3613 3613 /* Check the unsolicited buffer array. */
3614 3614 if (ha->ub_array[ub_array_index] == NULL) {
3615 3615 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3616 3616 rval = FC_UB_BADTOKEN;
3617 3617 break;
3618 3618 }
3619 3619
3620 3620 /* Check the state of the unsolicited buffer. */
3621 3621 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3622 3622 if (sp->flags & SRB_UB_IN_FCA) {
3623 3623 EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3624 3624 rval = FC_UB_BADTOKEN;
3625 3625 break;
3626 3626 }
3627 3627 }
3628 3628
3629 3629 /* If all tokens checkout, release the buffers. */
3630 3630 if (rval == FC_SUCCESS) {
3631 3631 /* Check all returned tokens. */
3632 3632 for (index = 0; index < count; index++) {
3633 3633 fc_unsol_buf_t *ubp;
3634 3634
3635 3635 ub_array_index = tokens[index];
3636 3636 ubp = ha->ub_array[ub_array_index];
3637 3637 sp = ubp->ub_fca_private;
3638 3638
3639 3639 ubp->ub_resp_flags = 0;
3640 3640 sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3641 3641 sp->flags |= SRB_UB_IN_FCA;
3642 3642
3643 3643 /* IP buffer. */
3644 3644 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3645 3645 ub_ip_updated = TRUE;
3646 3646 }
3647 3647 }
3648 3648 }
3649 3649
3650 3650 QL_UB_UNLOCK(ha);
3651 3651 /* Release adapter state lock. */
3652 3652 ADAPTER_STATE_UNLOCK(ha);
3653 3653
3654 3654 /*
3655 3655 * XXX: We should call ql_isp_rcvbuf() to return a
3656 3656 * buffer to ISP only if the number of buffers fall below
3657 3657 * the low water mark.
3658 3658 */
3659 3659 if (ub_ip_updated) {
3660 3660 ql_isp_rcvbuf(ha);
3661 3661 }
3662 3662
3663 3663 if (rval != FC_SUCCESS) {
3664 3664 EL(ha, "failed, rval = %xh\n", rval);
3665 3665 } else {
3666 3666 /*EMPTY*/
3667 3667 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3668 3668 }
3669 3669 return (rval);
3670 3670 }
3671 3671
3672 3672 /*
3673 3673 * ql_abort
3674 3674 * Abort a packet.
3675 3675 *
3676 3676 * Input:
3677 3677 * fca_handle = handle setup by ql_bind_port().
3678 3678 * pkt = pointer to fc_packet.
3679 3679 * flags = KM_SLEEP flag.
3680 3680 *
3681 3681 * Returns:
3682 3682 * FC_SUCCESS - the packet has successfully aborted.
3683 3683 * FC_ABORTED - the packet has successfully aborted.
3684 3684 * FC_ABORTING - the packet is being aborted.
3685 3685 * FC_ABORT_FAILED - the packet could not be aborted.
3686 3686 * FC_TRANSPORT_ERROR - a transport error occurred while attempting
3687 3687 * to abort the packet.
3688 3688 * FC_BADEXCHANGE - no packet found.
3689 3689 * FC_UNBOUND - the fca_handle specified is not bound.
3690 3690 *
3691 3691 * Context:
3692 3692 * Kernel context.
3693 3693 */
3694 3694 static int
3695 3695 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3696 3696 {
3697 3697 port_id_t d_id;
3698 3698 ql_link_t *link;
3699 3699 ql_adapter_state_t *ha, *pha;
3700 3700 ql_srb_t *sp;
3701 3701 ql_tgt_t *tq;
3702 3702 ql_lun_t *lq;
3703 3703 int rval = FC_ABORTED;
3704 3704
3705 3705 ha = ql_fca_handle_to_state(fca_handle);
3706 3706 if (ha == NULL) {
3707 3707 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3708 3708 (void *)fca_handle);
3709 3709 return (FC_UNBOUND);
3710 3710 }
3711 3711
3712 3712 pha = ha->pha;
3713 3713
3714 3714 QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3715 3715
3716 3716 /* Get target queue pointer. */
3717 3717 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3718 3718 tq = ql_d_id_to_queue(ha, d_id);
3719 3719
3720 3720 if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3721 3721 if (tq == NULL) {
3722 3722 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3723 3723 rval = FC_TRANSPORT_ERROR;
3724 3724 } else {
3725 3725 EL(ha, "failed, FC_OFFLINE\n");
3726 3726 rval = FC_OFFLINE;
3727 3727 }
3728 3728 return (rval);
3729 3729 }
3730 3730
3731 3731 sp = (ql_srb_t *)pkt->pkt_fca_private;
3732 3732 lq = sp->lun_queue;
3733 3733
3734 3734 /* Set poll flag if sleep wanted. */
3735 3735 if (flags == KM_SLEEP) {
3736 3736 sp->flags |= SRB_POLL;
3737 3737 }
3738 3738
3739 3739 /* Acquire target queue lock. */
3740 3740 DEVICE_QUEUE_LOCK(tq);
3741 3741 REQUEST_RING_LOCK(ha);
3742 3742
3743 3743 /* If command not already started. */
3744 3744 if (!(sp->flags & SRB_ISP_STARTED)) {
3745 3745 /* Check pending queue for command. */
3746 3746 sp = NULL;
3747 3747 for (link = pha->pending_cmds.first; link != NULL;
3748 3748 link = link->next) {
3749 3749 sp = link->base_address;
3750 3750 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3751 3751 /* Remove srb from q. */
3752 3752 ql_remove_link(&pha->pending_cmds, &sp->cmd);
3753 3753 break;
3754 3754 } else {
3755 3755 sp = NULL;
3756 3756 }
3757 3757 }
3758 3758 REQUEST_RING_UNLOCK(ha);
3759 3759
3760 3760 if (sp == NULL) {
3761 3761 /* Check for cmd on device queue. */
3762 3762 for (link = lq->cmd.first; link != NULL;
3763 3763 link = link->next) {
3764 3764 sp = link->base_address;
3765 3765 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3766 3766 /* Remove srb from q. */
3767 3767 ql_remove_link(&lq->cmd, &sp->cmd);
3768 3768 break;
3769 3769 } else {
3770 3770 sp = NULL;
3771 3771 }
3772 3772 }
3773 3773 }
3774 3774 /* Release device lock */
3775 3775 DEVICE_QUEUE_UNLOCK(tq);
3776 3776
3777 3777 /* If command on target queue. */
3778 3778 if (sp != NULL) {
3779 3779 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3780 3780
3781 3781 /* Set return status */
3782 3782 pkt->pkt_reason = CS_ABORTED;
3783 3783
3784 3784 sp->cmd.next = NULL;
3785 3785 ql_done(&sp->cmd);
3786 3786 rval = FC_ABORTED;
3787 3787 } else {
3788 3788 EL(ha, "failed, FC_BADEXCHANGE\n");
3789 3789 rval = FC_BADEXCHANGE;
3790 3790 }
3791 3791 } else if (sp->flags & SRB_ISP_COMPLETED) {
3792 3792 /* Release device queue lock. */
3793 3793 REQUEST_RING_UNLOCK(ha);
3794 3794 DEVICE_QUEUE_UNLOCK(tq);
3795 3795 EL(ha, "failed, already done, FC_FAILURE\n");
3796 3796 rval = FC_FAILURE;
3797 3797 } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3798 3798 (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3799 3799 /*
3800 3800 * If here, target data/resp ctio is with Fw.
3801 3801 * Since firmware is supposed to terminate such I/Os
3802 3802 * with an error, we need not do any thing. If FW
3803 3803 * decides not to terminate those IOs and simply keep
3804 3804 * quite then we need to initiate cleanup here by
3805 3805 * calling ql_done.
3806 3806 */
3807 3807 REQUEST_RING_UNLOCK(ha);
3808 3808 DEVICE_QUEUE_UNLOCK(tq);
3809 3809 rval = FC_ABORTED;
3810 3810 } else {
3811 3811 request_t *ep = pha->request_ring_bp;
3812 3812 uint16_t cnt;
3813 3813
3814 3814 if (sp->handle != 0) {
3815 3815 for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3816 3816 if (sp->handle == ddi_get32(
3817 3817 pha->hba_buf.acc_handle, &ep->handle)) {
3818 3818 ep->entry_type = INVALID_ENTRY_TYPE;
3819 3819 break;
3820 3820 }
3821 3821 ep++;
3822 3822 }
3823 3823 }
3824 3824
3825 3825 /* Release device queue lock. */
3826 3826 REQUEST_RING_UNLOCK(ha);
3827 3827 DEVICE_QUEUE_UNLOCK(tq);
3828 3828
3829 3829 sp->flags |= SRB_ABORTING;
3830 3830 (void) ql_abort_command(ha, sp);
3831 3831 pkt->pkt_reason = CS_ABORTED;
3832 3832 rval = FC_ABORTED;
3833 3833 }
3834 3834
3835 3835 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3836 3836
3837 3837 return (rval);
3838 3838 }
3839 3839
3840 3840 /*
3841 3841 * ql_reset
3842 3842 * Reset link or hardware.
3843 3843 *
3844 3844 * Input:
3845 3845 * fca_handle = handle setup by ql_bind_port().
3846 3846 * cmd = reset type command.
3847 3847 *
3848 3848 * Returns:
3849 3849 * FC_SUCCESS - reset has successfully finished.
3850 3850 * FC_UNBOUND - the fca_handle specified is not bound.
3851 3851 * FC_FAILURE - reset failed.
3852 3852 *
3853 3853 * Context:
3854 3854 * Kernel context.
3855 3855 */
3856 3856 static int
3857 3857 ql_reset(opaque_t fca_handle, uint32_t cmd)
3858 3858 {
3859 3859 ql_adapter_state_t *ha;
3860 3860 int rval = FC_SUCCESS, rval2;
3861 3861
3862 3862 ha = ql_fca_handle_to_state(fca_handle);
3863 3863 if (ha == NULL) {
3864 3864 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3865 3865 (void *)fca_handle);
3866 3866 return (FC_UNBOUND);
3867 3867 }
3868 3868
3869 3869 QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3870 3870 ha->vp_index, cmd);
3871 3871
3872 3872 switch (cmd) {
3873 3873 case FC_FCA_CORE:
3874 3874 /* dump firmware core if specified. */
3875 3875 if (ha->vp_index == 0) {
3876 3876 if (ql_dump_firmware(ha) != QL_SUCCESS) {
3877 3877 EL(ha, "failed, FC_FAILURE\n");
3878 3878 rval = FC_FAILURE;
3879 3879 }
3880 3880 }
3881 3881 break;
3882 3882 case FC_FCA_LINK_RESET:
3883 3883 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3884 3884 if (ql_loop_reset(ha) != QL_SUCCESS) {
3885 3885 EL(ha, "failed, FC_FAILURE-2\n");
3886 3886 rval = FC_FAILURE;
3887 3887 }
3888 3888 }
3889 3889 break;
3890 3890 case FC_FCA_RESET_CORE:
3891 3891 case FC_FCA_RESET:
3892 3892 /* if dump firmware core if specified. */
3893 3893 if (cmd == FC_FCA_RESET_CORE) {
3894 3894 if (ha->vp_index != 0) {
3895 3895 rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3896 3896 ? QL_SUCCESS : ql_loop_reset(ha);
3897 3897 } else {
3898 3898 rval2 = ql_dump_firmware(ha);
3899 3899 }
3900 3900 if (rval2 != QL_SUCCESS) {
3901 3901 EL(ha, "failed, FC_FAILURE-3\n");
3902 3902 rval = FC_FAILURE;
3903 3903 }
3904 3904 }
3905 3905
3906 3906 /* Free up all unsolicited buffers. */
3907 3907 if (ha->ub_allocated != 0) {
3908 3908 /* Inform to release buffers. */
3909 3909 ha->state = FC_PORT_SPEED_MASK(ha->state);
3910 3910 ha->state |= FC_STATE_RESET_REQUESTED;
3911 3911 if (ha->flags & FCA_BOUND) {
3912 3912 (ha->bind_info.port_statec_cb)
3913 3913 (ha->bind_info.port_handle,
3914 3914 ha->state);
3915 3915 }
3916 3916 }
3917 3917
3918 3918 ha->state = FC_PORT_SPEED_MASK(ha->state);
3919 3919
3920 3920 /* All buffers freed */
3921 3921 if (ha->ub_allocated == 0) {
3922 3922 /* Hardware reset. */
3923 3923 if (cmd == FC_FCA_RESET) {
3924 3924 if (ha->vp_index == 0) {
3925 3925 (void) ql_abort_isp(ha);
3926 3926 } else if (!(ha->pha->task_daemon_flags &
3927 3927 LOOP_DOWN)) {
3928 3928 (void) ql_loop_reset(ha);
3929 3929 }
3930 3930 }
3931 3931
3932 3932 /* Inform that the hardware has been reset */
3933 3933 ha->state |= FC_STATE_RESET;
3934 3934 } else {
3935 3935 /*
3936 3936 * the port driver expects an online if
3937 3937 * buffers are not freed.
3938 3938 */
3939 3939 if (ha->topology & QL_LOOP_CONNECTION) {
3940 3940 ha->state |= FC_STATE_LOOP;
3941 3941 } else {
3942 3942 ha->state |= FC_STATE_ONLINE;
3943 3943 }
3944 3944 }
3945 3945
3946 3946 TASK_DAEMON_LOCK(ha);
3947 3947 ha->task_daemon_flags |= FC_STATE_CHANGE;
3948 3948 TASK_DAEMON_UNLOCK(ha);
3949 3949
3950 3950 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3951 3951
3952 3952 break;
3953 3953 default:
3954 3954 EL(ha, "unknown cmd=%xh\n", cmd);
3955 3955 break;
3956 3956 }
3957 3957
3958 3958 if (rval != FC_SUCCESS) {
3959 3959 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3960 3960 } else {
3961 3961 /*EMPTY*/
3962 3962 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3963 3963 ha->vp_index);
3964 3964 }
3965 3965
3966 3966 return (rval);
3967 3967 }
3968 3968
3969 3969 /*
3970 3970 * ql_port_manage
3971 3971 * Perform port management or diagnostics.
3972 3972 *
3973 3973 * Input:
3974 3974 * fca_handle = handle setup by ql_bind_port().
3975 3975 * cmd = pointer to command structure.
3976 3976 *
3977 3977 * Returns:
3978 3978 * FC_SUCCESS - the request completed successfully.
3979 3979 * FC_FAILURE - the request did not complete successfully.
3980 3980 * FC_UNBOUND - the fca_handle specified is not bound.
3981 3981 *
3982 3982 * Context:
3983 3983 * Kernel context.
3984 3984 */
3985 3985 static int
3986 3986 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3987 3987 {
3988 3988 clock_t timer;
3989 3989 uint16_t index;
3990 3990 uint32_t *bp;
3991 3991 port_id_t d_id;
3992 3992 ql_link_t *link;
3993 3993 ql_adapter_state_t *ha, *pha;
3994 3994 ql_tgt_t *tq;
3995 3995 dma_mem_t buffer_xmt, buffer_rcv;
3996 3996 size_t length;
3997 3997 uint32_t cnt;
3998 3998 char buf[80];
3999 3999 lbp_t *lb;
4000 4000 ql_mbx_data_t mr;
4001 4001 app_mbx_cmd_t *mcp;
4002 4002 int i0;
4003 4003 uint8_t *bptr;
4004 4004 int rval2, rval = FC_SUCCESS;
4005 4005 uint32_t opcode;
4006 4006 uint32_t set_flags = 0;
4007 4007
4008 4008 ha = ql_fca_handle_to_state(fca_handle);
4009 4009 if (ha == NULL) {
4010 4010 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
4011 4011 (void *)fca_handle);
4012 4012 return (FC_UNBOUND);
4013 4013 }
4014 4014 pha = ha->pha;
4015 4015
4016 4016 QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
4017 4017 cmd->pm_cmd_code);
4018 4018
4019 4019 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
4020 4020
4021 4021 /*
4022 4022 * Wait for all outstanding commands to complete
4023 4023 */
4024 4024 index = (uint16_t)ql_wait_outstanding(ha);
4025 4025
4026 4026 if (index != MAX_OUTSTANDING_COMMANDS) {
4027 4027 ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
4028 4028 ql_restart_queues(ha);
4029 4029 EL(ha, "failed, FC_TRAN_BUSY\n");
4030 4030 return (FC_TRAN_BUSY);
4031 4031 }
4032 4032
4033 4033 switch (cmd->pm_cmd_code) {
4034 4034 case FC_PORT_BYPASS:
4035 4035 d_id.b24 = *cmd->pm_cmd_buf;
4036 4036 tq = ql_d_id_to_queue(ha, d_id);
4037 4037 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
4038 4038 EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
4039 4039 rval = FC_FAILURE;
4040 4040 }
4041 4041 break;
4042 4042 case FC_PORT_UNBYPASS:
4043 4043 d_id.b24 = *cmd->pm_cmd_buf;
4044 4044 tq = ql_d_id_to_queue(ha, d_id);
4045 4045 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
4046 4046 EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
4047 4047 rval = FC_FAILURE;
4048 4048 }
4049 4049 break;
4050 4050 case FC_PORT_GET_FW_REV:
4051 4051 (void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4052 4052 pha->fw_minor_version, pha->fw_subminor_version);
4053 4053 length = strlen(buf) + 1;
4054 4054 if (cmd->pm_data_len < length) {
4055 4055 cmd->pm_data_len = length;
4056 4056 EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4057 4057 rval = FC_FAILURE;
4058 4058 } else {
4059 4059 (void) strcpy(cmd->pm_data_buf, buf);
4060 4060 }
4061 4061 break;
4062 4062
4063 4063 case FC_PORT_GET_FCODE_REV: {
4064 4064 caddr_t fcode_ver_buf = NULL;
4065 4065
4066 4066 i0 = 0;
4067 4067 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4068 4068 rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4069 4069 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4070 4070 (caddr_t)&fcode_ver_buf, &i0);
4071 4071 length = (uint_t)i0;
4072 4072
4073 4073 if (rval2 != DDI_PROP_SUCCESS) {
4074 4074 EL(ha, "failed, getting version = %xh\n", rval2);
4075 4075 length = 20;
4076 4076 fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4077 4077 if (fcode_ver_buf != NULL) {
4078 4078 (void) sprintf(fcode_ver_buf,
4079 4079 "NO FCODE FOUND");
4080 4080 }
4081 4081 }
4082 4082
4083 4083 if (cmd->pm_data_len < length) {
4084 4084 EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4085 4085 "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4086 4086 cmd->pm_data_len = length;
4087 4087 rval = FC_FAILURE;
4088 4088 } else if (fcode_ver_buf != NULL) {
4089 4089 bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4090 4090 length);
4091 4091 }
4092 4092
4093 4093 if (fcode_ver_buf != NULL) {
4094 4094 kmem_free(fcode_ver_buf, length);
4095 4095 }
4096 4096 break;
4097 4097 }
4098 4098
4099 4099 case FC_PORT_GET_DUMP:
4100 4100 QL_DUMP_LOCK(pha);
4101 4101 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4102 4102 EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4103 4103 "length=%lxh\n", cmd->pm_data_len);
4104 4104 cmd->pm_data_len = pha->risc_dump_size;
4105 4105 rval = FC_FAILURE;
4106 4106 } else if (pha->ql_dump_state & QL_DUMPING) {
4107 4107 EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4108 4108 rval = FC_TRAN_BUSY;
4109 4109 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4110 4110 (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4111 4111 pha->ql_dump_state |= QL_DUMP_UPLOADED;
4112 4112 } else {
4113 4113 EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4114 4114 rval = FC_FAILURE;
4115 4115 }
4116 4116 QL_DUMP_UNLOCK(pha);
4117 4117 break;
4118 4118 case FC_PORT_FORCE_DUMP:
4119 4119 PORTMANAGE_LOCK(ha);
4120 4120 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4121 4121 EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4122 4122 rval = FC_FAILURE;
4123 4123 }
4124 4124 PORTMANAGE_UNLOCK(ha);
4125 4125 break;
4126 4126 case FC_PORT_DOWNLOAD_FW:
4127 4127 PORTMANAGE_LOCK(ha);
4128 4128 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4129 4129 if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4130 4130 (uint32_t)cmd->pm_data_len,
4131 4131 ha->flash_fw_addr << 2) != QL_SUCCESS) {
4132 4132 EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4133 4133 rval = FC_FAILURE;
4134 4134 }
4135 4135 ql_reset_chip(ha);
4136 4136 set_flags |= ISP_ABORT_NEEDED;
4137 4137 } else {
4138 4138 /* Save copy of the firmware. */
4139 4139 if (pha->risc_code != NULL) {
4140 4140 kmem_free(pha->risc_code, pha->risc_code_size);
4141 4141 pha->risc_code = NULL;
4142 4142 pha->risc_code_size = 0;
4143 4143 }
4144 4144
4145 4145 pha->risc_code = kmem_alloc(cmd->pm_data_len,
4146 4146 KM_SLEEP);
4147 4147 if (pha->risc_code != NULL) {
4148 4148 pha->risc_code_size =
4149 4149 (uint32_t)cmd->pm_data_len;
4150 4150 bcopy(cmd->pm_data_buf, pha->risc_code,
4151 4151 cmd->pm_data_len);
4152 4152
4153 4153 /* Do abort to force reload. */
4154 4154 ql_reset_chip(ha);
4155 4155 if (ql_abort_isp(ha) != QL_SUCCESS) {
4156 4156 kmem_free(pha->risc_code,
4157 4157 pha->risc_code_size);
4158 4158 pha->risc_code = NULL;
4159 4159 pha->risc_code_size = 0;
4160 4160 ql_reset_chip(ha);
4161 4161 (void) ql_abort_isp(ha);
4162 4162 EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4163 4163 " FC_FAILURE\n");
4164 4164 rval = FC_FAILURE;
4165 4165 }
4166 4166 }
4167 4167 }
4168 4168 PORTMANAGE_UNLOCK(ha);
4169 4169 break;
4170 4170 case FC_PORT_GET_DUMP_SIZE:
4171 4171 bp = (uint32_t *)cmd->pm_data_buf;
4172 4172 *bp = pha->risc_dump_size;
4173 4173 break;
4174 4174 case FC_PORT_DIAG:
4175 4175 /*
4176 4176 * Prevents concurrent diags
4177 4177 */
4178 4178 PORTMANAGE_LOCK(ha);
4179 4179
4180 4180 /* Wait for suspension to end. */
4181 4181 for (timer = 0; timer < 3000 &&
4182 4182 pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4183 4183 ql_delay(ha, 10000);
4184 4184 }
4185 4185
4186 4186 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4187 4187 EL(ha, "failed, FC_TRAN_BUSY-2\n");
4188 4188 rval = FC_TRAN_BUSY;
4189 4189 PORTMANAGE_UNLOCK(ha);
4190 4190 break;
4191 4191 }
4192 4192
4193 4193 switch (cmd->pm_cmd_flags) {
4194 4194 case QL_DIAG_EXEFMW:
4195 4195 if (ql_start_firmware(ha) != QL_SUCCESS) {
4196 4196 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4197 4197 rval = FC_FAILURE;
4198 4198 }
4199 4199 break;
4200 4200 case QL_DIAG_CHKCMDQUE:
4201 4201 for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4202 4202 i0++) {
4203 4203 cnt += (pha->outstanding_cmds[i0] != NULL);
4204 4204 }
4205 4205 if (cnt != 0) {
4206 4206 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4207 4207 "FC_FAILURE\n");
4208 4208 rval = FC_FAILURE;
4209 4209 }
4210 4210 break;
4211 4211 case QL_DIAG_FMWCHKSUM:
4212 4212 if (ql_verify_checksum(ha) != QL_SUCCESS) {
4213 4213 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4214 4214 "FC_FAILURE\n");
4215 4215 rval = FC_FAILURE;
4216 4216 }
4217 4217 break;
4218 4218 case QL_DIAG_SLFTST:
4219 4219 if (ql_online_selftest(ha) != QL_SUCCESS) {
4220 4220 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4221 4221 rval = FC_FAILURE;
4222 4222 }
4223 4223 ql_reset_chip(ha);
4224 4224 set_flags |= ISP_ABORT_NEEDED;
4225 4225 break;
4226 4226 case QL_DIAG_REVLVL:
4227 4227 if (cmd->pm_stat_len <
4228 4228 sizeof (ql_adapter_revlvl_t)) {
4229 4229 EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4230 4230 "slen=%lxh, rlvllen=%lxh\n",
4231 4231 cmd->pm_stat_len,
4232 4232 sizeof (ql_adapter_revlvl_t));
4233 4233 rval = FC_NOMEM;
4234 4234 } else {
4235 4235 bcopy((void *)&(pha->adapter_stats->revlvl),
4236 4236 cmd->pm_stat_buf,
4237 4237 (size_t)cmd->pm_stat_len);
4238 4238 cmd->pm_stat_len =
4239 4239 sizeof (ql_adapter_revlvl_t);
4240 4240 }
4241 4241 break;
4242 4242 case QL_DIAG_LPBMBX:
4243 4243
4244 4244 if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4245 4245 EL(ha, "failed, QL_DIAG_LPBMBX "
4246 4246 "FC_INVALID_REQUEST, pmlen=%lxh, "
4247 4247 "reqd=%lxh\n", cmd->pm_data_len,
4248 4248 sizeof (struct app_mbx_cmd));
4249 4249 rval = FC_INVALID_REQUEST;
4250 4250 break;
4251 4251 }
4252 4252 /*
4253 4253 * Don't do the wrap test on a 2200 when the
4254 4254 * firmware is running.
4255 4255 */
4256 4256 if (!CFG_IST(ha, CFG_CTRL_2200)) {
4257 4257 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4258 4258 mr.mb[1] = mcp->mb[1];
4259 4259 mr.mb[2] = mcp->mb[2];
4260 4260 mr.mb[3] = mcp->mb[3];
4261 4261 mr.mb[4] = mcp->mb[4];
4262 4262 mr.mb[5] = mcp->mb[5];
4263 4263 mr.mb[6] = mcp->mb[6];
4264 4264 mr.mb[7] = mcp->mb[7];
4265 4265
4266 4266 bcopy(&mr.mb[0], &mr.mb[10],
4267 4267 sizeof (uint16_t) * 8);
4268 4268
4269 4269 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4270 4270 EL(ha, "failed, QL_DIAG_LPBMBX "
4271 4271 "FC_FAILURE\n");
4272 4272 rval = FC_FAILURE;
4273 4273 break;
4274 4274 } else {
4275 4275 for (i0 = 1; i0 < 8; i0++) {
4276 4276 if (mr.mb[i0] !=
4277 4277 mr.mb[i0 + 10]) {
4278 4278 EL(ha, "failed, "
4279 4279 "QL_DIAG_LPBMBX "
4280 4280 "FC_FAILURE-2\n");
4281 4281 rval = FC_FAILURE;
4282 4282 break;
4283 4283 }
4284 4284 }
4285 4285 }
4286 4286
4287 4287 if (rval == FC_FAILURE) {
4288 4288 (void) ql_flash_errlog(ha,
4289 4289 FLASH_ERRLOG_ISP_ERR, 0,
4290 4290 RD16_IO_REG(ha, hccr),
4291 4291 RD16_IO_REG(ha, istatus));
4292 4292 set_flags |= ISP_ABORT_NEEDED;
4293 4293 }
4294 4294 }
4295 4295 break;
4296 4296 case QL_DIAG_LPBDTA:
4297 4297 /*
4298 4298 * For loopback data, we receive the
4299 4299 * data back in pm_stat_buf. This provides
4300 4300 * the user an opportunity to compare the
4301 4301 * transmitted and received data.
4302 4302 *
4303 4303 * NB: lb->options are:
4304 4304 * 0 --> Ten bit loopback
4305 4305 * 1 --> One bit loopback
4306 4306 * 2 --> External loopback
4307 4307 */
4308 4308 if (cmd->pm_data_len > 65536) {
4309 4309 rval = FC_TOOMANY;
4310 4310 EL(ha, "failed, QL_DIAG_LPBDTA "
4311 4311 "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4312 4312 break;
4313 4313 }
4314 4314 if (ql_get_dma_mem(ha, &buffer_xmt,
4315 4315 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4316 4316 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4317 4317 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4318 4318 rval = FC_NOMEM;
4319 4319 break;
4320 4320 }
4321 4321 if (ql_get_dma_mem(ha, &buffer_rcv,
4322 4322 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4323 4323 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4324 4324 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4325 4325 rval = FC_NOMEM;
4326 4326 break;
4327 4327 }
4328 4328 ddi_rep_put8(buffer_xmt.acc_handle,
4329 4329 (uint8_t *)cmd->pm_data_buf,
4330 4330 (uint8_t *)buffer_xmt.bp,
4331 4331 cmd->pm_data_len, DDI_DEV_AUTOINCR);
4332 4332
4333 4333 /* 22xx's adapter must be in loop mode for test. */
4334 4334 if (CFG_IST(ha, CFG_CTRL_2200)) {
4335 4335 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4336 4336 if (ha->flags & POINT_TO_POINT ||
4337 4337 (ha->task_daemon_flags & LOOP_DOWN &&
4338 4338 *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4339 4339 cnt = *bptr;
4340 4340 *bptr = (uint8_t)
4341 4341 (*bptr & ~(BIT_6|BIT_5|BIT_4));
4342 4342 (void) ql_abort_isp(ha);
4343 4343 *bptr = (uint8_t)cnt;
4344 4344 }
4345 4345 }
4346 4346
4347 4347 /* Shutdown IP. */
4348 4348 if (pha->flags & IP_INITIALIZED) {
4349 4349 (void) ql_shutdown_ip(pha);
4350 4350 }
4351 4351
4352 4352 lb = (lbp_t *)cmd->pm_cmd_buf;
4353 4353 lb->transfer_count =
4354 4354 (uint32_t)cmd->pm_data_len;
4355 4355 lb->transfer_segment_count = 0;
4356 4356 lb->receive_segment_count = 0;
4357 4357 lb->transfer_data_address =
4358 4358 buffer_xmt.cookie.dmac_address;
4359 4359 lb->receive_data_address =
4360 4360 buffer_rcv.cookie.dmac_address;
4361 4361
4362 4362 if (ql_loop_back(ha, 0, lb,
4363 4363 buffer_xmt.cookie.dmac_notused,
4364 4364 buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4365 4365 bzero((void *)cmd->pm_stat_buf,
4366 4366 cmd->pm_stat_len);
4367 4367 ddi_rep_get8(buffer_rcv.acc_handle,
4368 4368 (uint8_t *)cmd->pm_stat_buf,
4369 4369 (uint8_t *)buffer_rcv.bp,
4370 4370 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4371 4371 rval = FC_SUCCESS;
4372 4372 } else {
4373 4373 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4374 4374 rval = FC_FAILURE;
4375 4375 }
4376 4376
4377 4377 ql_free_phys(ha, &buffer_xmt);
4378 4378 ql_free_phys(ha, &buffer_rcv);
4379 4379
4380 4380 /* Needed to recover the f/w */
4381 4381 set_flags |= ISP_ABORT_NEEDED;
4382 4382
4383 4383 /* Restart IP if it was shutdown. */
4384 4384 if (pha->flags & IP_ENABLED &&
4385 4385 !(pha->flags & IP_INITIALIZED)) {
4386 4386 (void) ql_initialize_ip(pha);
4387 4387 ql_isp_rcvbuf(pha);
4388 4388 }
4389 4389
4390 4390 break;
4391 4391 case QL_DIAG_ECHO: {
4392 4392 /*
4393 4393 * issue an echo command with a user supplied
4394 4394 * data pattern and destination address
4395 4395 */
4396 4396 echo_t echo; /* temp echo struct */
4397 4397
4398 4398 /* Setup echo cmd & adjust for platform */
4399 4399 opcode = QL_ECHO_CMD;
4400 4400 BIG_ENDIAN_32(&opcode);
4401 4401
4402 4402 /*
4403 4403 * due to limitations in the ql
4404 4404 * firmaware the echo data field is
4405 4405 * limited to 220
4406 4406 */
4407 4407 if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4408 4408 (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4409 4409 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4410 4410 "cmdl1=%lxh, statl2=%lxh\n",
4411 4411 cmd->pm_cmd_len, cmd->pm_stat_len);
4412 4412 rval = FC_TOOMANY;
4413 4413 break;
4414 4414 }
4415 4415
4416 4416 /*
4417 4417 * the input data buffer has the user
4418 4418 * supplied data pattern. The "echoed"
4419 4419 * data will be DMAed into the output
4420 4420 * data buffer. Therefore the length
4421 4421 * of the output buffer must be equal
4422 4422 * to or greater then the input buffer
4423 4423 * length
4424 4424 */
4425 4425 if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4426 4426 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4427 4427 " cmdl1=%lxh, statl2=%lxh\n",
4428 4428 cmd->pm_cmd_len, cmd->pm_stat_len);
4429 4429 rval = FC_TOOMANY;
4430 4430 break;
4431 4431 }
4432 4432 /* add four bytes for the opcode */
4433 4433 echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4434 4434
4435 4435 /*
4436 4436 * are we 32 or 64 bit addressed???
4437 4437 * We need to get the appropriate
4438 4438 * DMA and set the command options;
4439 4439 * 64 bit (bit 6) or 32 bit
4440 4440 * (no bit 6) addressing.
4441 4441 * while we are at it lets ask for
4442 4442 * real echo (bit 15)
4443 4443 */
4444 4444 echo.options = BIT_15;
4445 4445 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4446 4446 !(CFG_IST(ha, CFG_CTRL_8081))) {
4447 4447 echo.options = (uint16_t)
4448 4448 (echo.options | BIT_6);
4449 4449 }
4450 4450
4451 4451 /*
4452 4452 * Set up the DMA mappings for the
4453 4453 * output and input data buffers.
4454 4454 * First the output buffer
4455 4455 */
4456 4456 if (ql_get_dma_mem(ha, &buffer_xmt,
4457 4457 (uint32_t)(cmd->pm_data_len + 4),
4458 4458 LITTLE_ENDIAN_DMA,
4459 4459 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4460 4460 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4461 4461 rval = FC_NOMEM;
4462 4462 break;
4463 4463 }
4464 4464 echo.transfer_data_address = buffer_xmt.cookie;
4465 4465
4466 4466 /* Next the input buffer */
4467 4467 if (ql_get_dma_mem(ha, &buffer_rcv,
4468 4468 (uint32_t)(cmd->pm_data_len + 4),
4469 4469 LITTLE_ENDIAN_DMA,
4470 4470 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4471 4471 /*
4472 4472 * since we could not allocate
4473 4473 * DMA space for the input
4474 4474 * buffer we need to clean up
4475 4475 * by freeing the DMA space
4476 4476 * we allocated for the output
4477 4477 * buffer
4478 4478 */
4479 4479 ql_free_phys(ha, &buffer_xmt);
4480 4480 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4481 4481 rval = FC_NOMEM;
4482 4482 break;
4483 4483 }
4484 4484 echo.receive_data_address = buffer_rcv.cookie;
4485 4485
4486 4486 /*
4487 4487 * copy the 4 byte ECHO op code to the
4488 4488 * allocated DMA space
4489 4489 */
4490 4490 ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4491 4491 (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4492 4492
4493 4493 /*
4494 4494 * copy the user supplied data to the
4495 4495 * allocated DMA space
4496 4496 */
4497 4497 ddi_rep_put8(buffer_xmt.acc_handle,
4498 4498 (uint8_t *)cmd->pm_cmd_buf,
4499 4499 (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4500 4500 DDI_DEV_AUTOINCR);
4501 4501
4502 4502 /* Shutdown IP. */
4503 4503 if (pha->flags & IP_INITIALIZED) {
4504 4504 (void) ql_shutdown_ip(pha);
4505 4505 }
4506 4506
4507 4507 /* send the echo */
4508 4508 if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4509 4509 ddi_rep_put8(buffer_rcv.acc_handle,
4510 4510 (uint8_t *)buffer_rcv.bp + 4,
4511 4511 (uint8_t *)cmd->pm_stat_buf,
4512 4512 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4513 4513 } else {
4514 4514 EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4515 4515 rval = FC_FAILURE;
4516 4516 }
4517 4517
4518 4518 /* Restart IP if it was shutdown. */
4519 4519 if (pha->flags & IP_ENABLED &&
4520 4520 !(pha->flags & IP_INITIALIZED)) {
4521 4521 (void) ql_initialize_ip(pha);
4522 4522 ql_isp_rcvbuf(pha);
4523 4523 }
4524 4524 /* free up our DMA buffers */
4525 4525 ql_free_phys(ha, &buffer_xmt);
4526 4526 ql_free_phys(ha, &buffer_rcv);
4527 4527 break;
4528 4528 }
4529 4529 default:
4530 4530 EL(ha, "unknown=%xh, FC_PORT_DIAG "
4531 4531 "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4532 4532 rval = FC_INVALID_REQUEST;
4533 4533 break;
4534 4534 }
4535 4535 PORTMANAGE_UNLOCK(ha);
4536 4536 break;
4537 4537 case FC_PORT_LINK_STATE:
4538 4538 /* Check for name equal to null. */
4539 4539 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4540 4540 index++) {
4541 4541 if (cmd->pm_cmd_buf[index] != 0) {
4542 4542 break;
4543 4543 }
4544 4544 }
4545 4545
4546 4546 /* If name not null. */
4547 4547 if (index < 8 && cmd->pm_cmd_len >= 8) {
4548 4548 /* Locate device queue. */
4549 4549 tq = NULL;
4550 4550 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4551 4551 tq == NULL; index++) {
4552 4552 for (link = ha->dev[index].first; link != NULL;
4553 4553 link = link->next) {
4554 4554 tq = link->base_address;
4555 4555
4556 4556 if (bcmp((void *)&tq->port_name[0],
4557 4557 (void *)cmd->pm_cmd_buf, 8) == 0) {
4558 4558 break;
4559 4559 } else {
4560 4560 tq = NULL;
4561 4561 }
4562 4562 }
4563 4563 }
4564 4564
4565 4565 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4566 4566 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4567 4567 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4568 4568 } else {
4569 4569 cnt = FC_PORT_SPEED_MASK(ha->state) |
4570 4570 FC_STATE_OFFLINE;
4571 4571 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4572 4572 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4573 4573 }
4574 4574 } else {
4575 4575 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4576 4576 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4577 4577 }
4578 4578 break;
4579 4579 case FC_PORT_INITIALIZE:
4580 4580 if (cmd->pm_cmd_len >= 8) {
4581 4581 tq = NULL;
4582 4582 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4583 4583 tq == NULL; index++) {
4584 4584 for (link = ha->dev[index].first; link != NULL;
4585 4585 link = link->next) {
4586 4586 tq = link->base_address;
4587 4587
4588 4588 if (bcmp((void *)&tq->port_name[0],
4589 4589 (void *)cmd->pm_cmd_buf, 8) == 0) {
4590 4590 if (!VALID_DEVICE_ID(ha,
4591 4591 tq->loop_id)) {
4592 4592 tq = NULL;
4593 4593 }
4594 4594 break;
4595 4595 } else {
4596 4596 tq = NULL;
4597 4597 }
4598 4598 }
4599 4599 }
4600 4600
4601 4601 if (tq == NULL || ql_target_reset(ha, tq,
4602 4602 ha->loop_reset_delay) != QL_SUCCESS) {
4603 4603 EL(ha, "failed, FC_PORT_INITIALIZE "
4604 4604 "FC_FAILURE\n");
4605 4605 rval = FC_FAILURE;
4606 4606 }
4607 4607 } else {
4608 4608 EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4609 4609 "clen=%lxh\n", cmd->pm_cmd_len);
4610 4610
4611 4611 rval = FC_FAILURE;
4612 4612 }
4613 4613 break;
4614 4614 case FC_PORT_RLS:
4615 4615 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4616 4616 EL(ha, "failed, buffer size passed: %lxh, "
4617 4617 "req: %lxh\n", cmd->pm_data_len,
4618 4618 (sizeof (fc_rls_acc_t)));
4619 4619 rval = FC_FAILURE;
4620 4620 } else if (LOOP_NOT_READY(pha)) {
4621 4621 EL(ha, "loop NOT ready\n");
4622 4622 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4623 4623 } else if (ql_get_link_status(ha, ha->loop_id,
4624 4624 cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4625 4625 EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4626 4626 rval = FC_FAILURE;
4627 4627 #ifdef _BIG_ENDIAN
4628 4628 } else {
4629 4629 fc_rls_acc_t *rls;
4630 4630
4631 4631 rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4632 4632 LITTLE_ENDIAN_32(&rls->rls_link_fail);
4633 4633 LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4634 4634 LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4635 4635 LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4636 4636 #endif /* _BIG_ENDIAN */
4637 4637 }
4638 4638 break;
4639 4639 case FC_PORT_GET_NODE_ID:
4640 4640 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4641 4641 cmd->pm_data_buf) != QL_SUCCESS) {
4642 4642 EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4643 4643 rval = FC_FAILURE;
4644 4644 }
4645 4645 break;
4646 4646 case FC_PORT_SET_NODE_ID:
4647 4647 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4648 4648 cmd->pm_data_buf) != QL_SUCCESS) {
4649 4649 EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4650 4650 rval = FC_FAILURE;
4651 4651 }
4652 4652 break;
4653 4653 case FC_PORT_DOWNLOAD_FCODE:
4654 4654 PORTMANAGE_LOCK(ha);
4655 4655 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4656 4656 rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4657 4657 (uint32_t)cmd->pm_data_len);
4658 4658 } else {
4659 4659 if (cmd->pm_data_buf[0] == 4 &&
4660 4660 cmd->pm_data_buf[8] == 0 &&
4661 4661 cmd->pm_data_buf[9] == 0x10 &&
4662 4662 cmd->pm_data_buf[10] == 0 &&
4663 4663 cmd->pm_data_buf[11] == 0) {
4664 4664 rval = ql_24xx_load_flash(ha,
4665 4665 (uint8_t *)cmd->pm_data_buf,
4666 4666 (uint32_t)cmd->pm_data_len,
4667 4667 ha->flash_fw_addr << 2);
4668 4668 } else {
4669 4669 rval = ql_24xx_load_flash(ha,
4670 4670 (uint8_t *)cmd->pm_data_buf,
4671 4671 (uint32_t)cmd->pm_data_len, 0);
4672 4672 }
4673 4673 }
4674 4674
4675 4675 if (rval != QL_SUCCESS) {
4676 4676 EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4677 4677 rval = FC_FAILURE;
4678 4678 } else {
4679 4679 rval = FC_SUCCESS;
4680 4680 }
4681 4681 ql_reset_chip(ha);
4682 4682 set_flags |= ISP_ABORT_NEEDED;
4683 4683 PORTMANAGE_UNLOCK(ha);
4684 4684 break;
4685 4685 default:
4686 4686 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4687 4687 rval = FC_BADCMD;
4688 4688 break;
4689 4689 }
4690 4690
4691 4691 /* Wait for suspension to end. */
4692 4692 ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4693 4693 timer = 0;
4694 4694
4695 4695 while (timer++ < 3000 &&
4696 4696 ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4697 4697 ql_delay(ha, 10000);
4698 4698 }
4699 4699
4700 4700 ql_restart_queues(ha);
4701 4701
4702 4702 if (rval != FC_SUCCESS) {
4703 4703 EL(ha, "failed, rval = %xh\n", rval);
4704 4704 } else {
4705 4705 /*EMPTY*/
4706 4706 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4707 4707 }
4708 4708
4709 4709 return (rval);
4710 4710 }
4711 4711
4712 4712 static opaque_t
4713 4713 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4714 4714 {
4715 4715 port_id_t id;
4716 4716 ql_adapter_state_t *ha;
4717 4717 ql_tgt_t *tq;
4718 4718
4719 4719 id.r.rsvd_1 = 0;
4720 4720 id.b24 = d_id.port_id;
4721 4721
4722 4722 ha = ql_fca_handle_to_state(fca_handle);
4723 4723 if (ha == NULL) {
4724 4724 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4725 4725 (void *)fca_handle);
4726 4726 return (NULL);
4727 4727 }
4728 4728 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4729 4729
4730 4730 tq = ql_d_id_to_queue(ha, id);
4731 4731
4732 4732 if (tq == NULL) {
4733 4733 EL(ha, "failed, tq=NULL\n");
4734 4734 } else {
4735 4735 /*EMPTY*/
4736 4736 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4737 4737 }
4738 4738 return (tq);
4739 4739 }
4740 4740
4741 4741 /* ************************************************************************ */
4742 4742 /* FCA Driver Local Support Functions. */
4743 4743 /* ************************************************************************ */
4744 4744
4745 4745 /*
4746 4746 * ql_cmd_setup
4747 4747 * Verifies proper command.
4748 4748 *
4749 4749 * Input:
4750 4750 * fca_handle = handle setup by ql_bind_port().
4751 4751 * pkt = pointer to fc_packet.
4752 4752 * rval = pointer for return value.
4753 4753 *
4754 4754 * Returns:
4755 4755 * Adapter state pointer, NULL = failure.
4756 4756 *
4757 4757 * Context:
4758 4758 * Kernel context.
4759 4759 */
4760 4760 static ql_adapter_state_t *
4761 4761 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4762 4762 {
4763 4763 ql_adapter_state_t *ha, *pha;
4764 4764 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
4765 4765 ql_tgt_t *tq;
4766 4766 port_id_t d_id;
4767 4767
4768 4768 pkt->pkt_resp_resid = 0;
4769 4769 pkt->pkt_data_resid = 0;
4770 4770
4771 4771 /* check that the handle is assigned by this FCA */
4772 4772 ha = ql_fca_handle_to_state(fca_handle);
4773 4773 if (ha == NULL) {
4774 4774 *rval = FC_UNBOUND;
4775 4775 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4776 4776 (void *)fca_handle);
4777 4777 return (NULL);
4778 4778 }
4779 4779 pha = ha->pha;
4780 4780
4781 4781 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4782 4782
4783 4783 if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4784 4784 return (ha);
4785 4785 }
4786 4786
4787 4787 if (!(pha->flags & ONLINE)) {
4788 4788 pkt->pkt_state = FC_PKT_LOCAL_RJT;
4789 4789 pkt->pkt_reason = FC_REASON_HW_ERROR;
4790 4790 *rval = FC_TRANSPORT_ERROR;
4791 4791 EL(ha, "failed, not online hf=%xh\n", pha->flags);
4792 4792 return (NULL);
4793 4793 }
4794 4794
4795 4795 /* Exit on loop down. */
4796 4796 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4797 4797 pha->task_daemon_flags & LOOP_DOWN &&
4798 4798 pha->loop_down_timer <= pha->loop_down_abort_time) {
4799 4799 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4800 4800 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4801 4801 *rval = FC_OFFLINE;
4802 4802 EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4803 4803 return (NULL);
4804 4804 }
4805 4805
4806 4806 if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4807 4807 pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4808 4808 tq = (ql_tgt_t *)pkt->pkt_fca_device;
4809 4809 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4810 4810 d_id.r.rsvd_1 = 0;
4811 4811 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4812 4812 tq = ql_d_id_to_queue(ha, d_id);
4813 4813
4814 4814 pkt->pkt_fca_device = (opaque_t)tq;
4815 4815 }
4816 4816
4817 4817 if (tq != NULL) {
4818 4818 DEVICE_QUEUE_LOCK(tq);
4819 4819 if (tq->flags & (TQF_RSCN_RCVD |
4820 4820 TQF_NEED_AUTHENTICATION)) {
4821 4821 *rval = FC_DEVICE_BUSY;
4822 4822 DEVICE_QUEUE_UNLOCK(tq);
4823 4823 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4824 4824 tq->flags, tq->d_id.b24);
4825 4825 return (NULL);
4826 4826 }
4827 4827 DEVICE_QUEUE_UNLOCK(tq);
4828 4828 }
4829 4829 }
4830 4830
4831 4831 /*
4832 4832 * Check DMA pointers.
4833 4833 */
4834 4834 *rval = DDI_SUCCESS;
4835 4835 if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4836 4836 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4837 4837 *rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4838 4838 if (*rval == DDI_SUCCESS) {
4839 4839 *rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4840 4840 }
4841 4841 }
4842 4842
4843 4843 if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4844 4844 pkt->pkt_rsplen != 0) {
4845 4845 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4846 4846 *rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4847 4847 if (*rval == DDI_SUCCESS) {
4848 4848 *rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4849 4849 }
4850 4850 }
4851 4851
4852 4852 /*
4853 4853 * Minimum branch conditional; Change it with care.
4854 4854 */
4855 4855 if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4856 4856 (pkt->pkt_datalen != 0)) != 0) {
4857 4857 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4858 4858 *rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4859 4859 if (*rval == DDI_SUCCESS) {
4860 4860 *rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4861 4861 }
4862 4862 }
4863 4863
4864 4864 if (*rval != DDI_SUCCESS) {
4865 4865 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4866 4866 pkt->pkt_reason = FC_REASON_DMA_ERROR;
4867 4867
4868 4868 /* Do command callback. */
4869 4869 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4870 4870 ql_awaken_task_daemon(ha, sp, 0, 0);
4871 4871 }
4872 4872 *rval = FC_BADPACKET;
4873 4873 EL(ha, "failed, bad DMA pointers\n");
4874 4874 return (NULL);
4875 4875 }
4876 4876
4877 4877 if (sp->magic_number != QL_FCA_BRAND) {
4878 4878 *rval = FC_BADPACKET;
4879 4879 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4880 4880 return (NULL);
4881 4881 }
4882 4882 *rval = FC_SUCCESS;
4883 4883
4884 4884 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4885 4885
4886 4886 return (ha);
4887 4887 }
4888 4888
4889 4889 /*
4890 4890 * ql_els_plogi
4891 4891 * Issue a extended link service port login request.
4892 4892 *
4893 4893 * Input:
4894 4894 * ha = adapter state pointer.
4895 4895 * pkt = pointer to fc_packet.
4896 4896 *
4897 4897 * Returns:
4898 4898 * FC_SUCCESS - the packet was accepted for transport.
4899 4899 * FC_TRANSPORT_ERROR - a transport error occurred.
4900 4900 *
4901 4901 * Context:
4902 4902 * Kernel context.
4903 4903 */
4904 4904 static int
4905 4905 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4906 4906 {
4907 4907 ql_tgt_t *tq = NULL;
4908 4908 port_id_t d_id;
4909 4909 la_els_logi_t acc;
4910 4910 class_svc_param_t *class3_param;
4911 4911 int ret;
4912 4912 int rval = FC_SUCCESS;
4913 4913
4914 4914 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4915 4915 pkt->pkt_cmd_fhdr.d_id);
4916 4916
4917 4917 TASK_DAEMON_LOCK(ha);
4918 4918 if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4919 4919 TASK_DAEMON_UNLOCK(ha);
4920 4920 QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4921 4921 return (FC_OFFLINE);
4922 4922 }
4923 4923 TASK_DAEMON_UNLOCK(ha);
4924 4924
4925 4925 bzero(&acc, sizeof (acc));
4926 4926 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4927 4927
4928 4928 ret = QL_SUCCESS;
4929 4929
4930 4930 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4931 4931 /*
4932 4932 * In p2p topology he sends a PLOGI after determining
4933 4933 * he has the N_Port login initiative.
4934 4934 */
4935 4935 ret = ql_p2p_plogi(ha, pkt);
4936 4936 }
4937 4937 if (ret == QL_CONSUMED) {
4938 4938 return (ret);
4939 4939 }
4940 4940
4941 4941 switch (ret = ql_login_port(ha, d_id)) {
4942 4942 case QL_SUCCESS:
4943 4943 tq = ql_d_id_to_queue(ha, d_id);
4944 4944 break;
4945 4945
4946 4946 case QL_LOOP_ID_USED:
4947 4947 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4948 4948 tq = ql_d_id_to_queue(ha, d_id);
4949 4949 }
4950 4950 break;
4951 4951
4952 4952 default:
4953 4953 break;
4954 4954 }
4955 4955
4956 4956 if (ret != QL_SUCCESS) {
4957 4957 /*
4958 4958 * Invalidate this entry so as to seek a fresh loop ID
4959 4959 * in case firmware reassigns it to something else
4960 4960 */
4961 4961 tq = ql_d_id_to_queue(ha, d_id);
4962 4962 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4963 4963 tq->loop_id = PORT_NO_LOOP_ID;
4964 4964 }
4965 4965 } else if (tq) {
4966 4966 (void) ql_get_port_database(ha, tq, PDF_ADISC);
4967 4967 }
4968 4968
4969 4969 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4970 4970 (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4971 4971
4972 4972 /* Build ACC. */
4973 4973 acc.ls_code.ls_code = LA_ELS_ACC;
4974 4974 acc.common_service.fcph_version = 0x2006;
4975 4975 acc.common_service.cmn_features = 0x8800;
4976 4976 acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4977 4977 acc.common_service.conc_sequences = 0xff;
4978 4978 acc.common_service.relative_offset = 0x03;
4979 4979 acc.common_service.e_d_tov = 0x7d0;
4980 4980
4981 4981 bcopy((void *)&tq->port_name[0],
4982 4982 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4983 4983 bcopy((void *)&tq->node_name[0],
4984 4984 (void *)&acc.node_ww_name.raw_wwn[0], 8);
4985 4985
4986 4986 class3_param = (class_svc_param_t *)&acc.class_3;
4987 4987 class3_param->class_valid_svc_opt = 0x8000;
4988 4988 class3_param->recipient_ctl = tq->class3_recipient_ctl;
4989 4989 class3_param->rcv_data_size = tq->class3_rcv_data_size;
4990 4990 class3_param->conc_sequences = tq->class3_conc_sequences;
4991 4991 class3_param->open_sequences_per_exch =
4992 4992 tq->class3_open_sequences_per_exch;
4993 4993
4994 4994 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4995 4995 acc.ls_code.ls_code = LA_ELS_RJT;
4996 4996 pkt->pkt_state = FC_PKT_TRAN_BSY;
4997 4997 pkt->pkt_reason = FC_REASON_XCHG_BSY;
4998 4998 EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4999 4999 rval = FC_TRAN_BUSY;
5000 5000 } else {
5001 5001 DEVICE_QUEUE_LOCK(tq);
5002 5002 tq->logout_sent = 0;
5003 5003 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5004 5004 if (CFG_IST(ha, CFG_CTRL_242581)) {
5005 5005 tq->flags |= TQF_IIDMA_NEEDED;
5006 5006 }
5007 5007 DEVICE_QUEUE_UNLOCK(tq);
5008 5008
5009 5009 if (CFG_IST(ha, CFG_CTRL_242581)) {
5010 5010 TASK_DAEMON_LOCK(ha);
5011 5011 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
5012 5012 TASK_DAEMON_UNLOCK(ha);
5013 5013 }
5014 5014
5015 5015 pkt->pkt_state = FC_PKT_SUCCESS;
5016 5016 }
5017 5017 } else {
5018 5018 /* Build RJT. */
5019 5019 acc.ls_code.ls_code = LA_ELS_RJT;
5020 5020
5021 5021 switch (ret) {
5022 5022 case QL_FUNCTION_TIMEOUT:
5023 5023 pkt->pkt_state = FC_PKT_TIMEOUT;
5024 5024 pkt->pkt_reason = FC_REASON_HW_ERROR;
5025 5025 break;
5026 5026
5027 5027 case QL_MEMORY_ALLOC_FAILED:
5028 5028 pkt->pkt_state = FC_PKT_LOCAL_BSY;
5029 5029 pkt->pkt_reason = FC_REASON_NOMEM;
5030 5030 rval = FC_TRAN_BUSY;
5031 5031 break;
5032 5032
5033 5033 case QL_FABRIC_NOT_INITIALIZED:
5034 5034 pkt->pkt_state = FC_PKT_FABRIC_BSY;
5035 5035 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5036 5036 rval = FC_TRAN_BUSY;
5037 5037 break;
5038 5038
5039 5039 default:
5040 5040 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5041 5041 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5042 5042 break;
5043 5043 }
5044 5044
5045 5045 EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
5046 5046 "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
5047 5047 pkt->pkt_reason, ret, rval);
5048 5048 }
5049 5049
5050 5050 if (tq != NULL) {
5051 5051 DEVICE_QUEUE_LOCK(tq);
5052 5052 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5053 5053 if (rval == FC_TRAN_BUSY) {
5054 5054 if (tq->d_id.b24 != BROADCAST_ADDR) {
5055 5055 tq->flags |= TQF_NEED_AUTHENTICATION;
5056 5056 }
5057 5057 }
5058 5058 DEVICE_QUEUE_UNLOCK(tq);
5059 5059 }
5060 5060
5061 5061 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5062 5062 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5063 5063
5064 5064 if (rval != FC_SUCCESS) {
5065 5065 EL(ha, "failed, rval = %xh\n", rval);
5066 5066 } else {
5067 5067 /*EMPTY*/
5068 5068 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5069 5069 }
5070 5070 return (rval);
5071 5071 }
5072 5072
5073 5073 /*
5074 5074 * ql_p2p_plogi
5075 5075 * Start an extended link service port login request using
5076 5076 * an ELS Passthru iocb.
5077 5077 *
5078 5078 * Input:
5079 5079 * ha = adapter state pointer.
5080 5080 * pkt = pointer to fc_packet.
5081 5081 *
5082 5082 * Returns:
5083 5083 * QL_CONSUMMED - the iocb was queued for transport.
5084 5084 *
5085 5085 * Context:
5086 5086 * Kernel context.
5087 5087 */
5088 5088 static int
5089 5089 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5090 5090 {
5091 5091 uint16_t id;
5092 5092 ql_tgt_t tmp;
5093 5093 ql_tgt_t *tq = &tmp;
5094 5094 int rval;
5095 5095 port_id_t d_id;
5096 5096 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5097 5097
5098 5098 tq->d_id.b.al_pa = 0;
5099 5099 tq->d_id.b.area = 0;
5100 5100 tq->d_id.b.domain = 0;
5101 5101
5102 5102 /*
5103 5103 * Verify that the port database hasn't moved beneath our feet by
5104 5104 * switching to the appropriate n_port_handle if necessary. This is
5105 5105 * less unplesant than the error recovery if the wrong one is used.
5106 5106 */
5107 5107 for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5108 5108 tq->loop_id = id;
5109 5109 rval = ql_get_port_database(ha, tq, PDF_NONE);
5110 5110 EL(ha, "rval=%xh\n", rval);
5111 5111 /* check all the ones not logged in for possible use */
5112 5112 if (rval == QL_NOT_LOGGED_IN) {
5113 5113 if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5114 5114 ha->n_port->n_port_handle = tq->loop_id;
5115 5115 EL(ha, "n_port_handle =%xh, master state=%x\n",
5116 5116 tq->loop_id, tq->master_state);
5117 5117 break;
5118 5118 }
5119 5119 /*
5120 5120 * Use a 'port unavailable' entry only
5121 5121 * if we used it before.
5122 5122 */
5123 5123 if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5124 5124 /* if the port_id matches, reuse it */
5125 5125 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5126 5126 EL(ha, "n_port_handle =%xh,"
5127 5127 "master state=%xh\n",
5128 5128 tq->loop_id, tq->master_state);
5129 5129 break;
5130 5130 } else if (tq->loop_id ==
5131 5131 ha->n_port->n_port_handle) {
5132 5132 // avoid a lint error
5133 5133 uint16_t *hndl;
5134 5134 uint16_t val;
5135 5135
5136 5136 hndl = &ha->n_port->n_port_handle;
5137 5137 val = *hndl;
5138 5138 val++;
5139 5139 val++;
5140 5140 *hndl = val;
5141 5141 }
5142 5142 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5143 5143 "master state=%x\n", rval, id, tq->loop_id,
5144 5144 tq->master_state);
5145 5145 }
5146 5146
5147 5147 }
5148 5148 if (rval == QL_SUCCESS) {
5149 5149 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5150 5150 ha->n_port->n_port_handle = tq->loop_id;
5151 5151 EL(ha, "n_port_handle =%xh, master state=%x\n",
5152 5152 tq->loop_id, tq->master_state);
5153 5153 break;
5154 5154 }
5155 5155 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5156 5156 "master state=%x\n", rval, id, tq->loop_id,
5157 5157 tq->master_state);
5158 5158 }
5159 5159 }
5160 5160 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5161 5161
5162 5162 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5163 5163 tq = ql_d_id_to_queue(ha, d_id);
5164 5164 ql_timeout_insert(ha, tq, sp);
5165 5165 ql_start_iocb(ha, sp);
5166 5166
5167 5167 return (QL_CONSUMED);
5168 5168 }
5169 5169
5170 5170
5171 5171 /*
5172 5172 * ql_els_flogi
5173 5173 * Issue a extended link service fabric login request.
5174 5174 *
5175 5175 * Input:
5176 5176 * ha = adapter state pointer.
5177 5177 * pkt = pointer to fc_packet.
5178 5178 *
5179 5179 * Returns:
5180 5180 * FC_SUCCESS - the packet was accepted for transport.
5181 5181 * FC_TRANSPORT_ERROR - a transport error occurred.
5182 5182 *
5183 5183 * Context:
5184 5184 * Kernel context.
5185 5185 */
5186 5186 static int
5187 5187 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5188 5188 {
5189 5189 ql_tgt_t *tq = NULL;
5190 5190 port_id_t d_id;
5191 5191 la_els_logi_t acc;
5192 5192 class_svc_param_t *class3_param;
5193 5193 int rval = FC_SUCCESS;
5194 5194 int accept = 0;
5195 5195
5196 5196 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5197 5197 pkt->pkt_cmd_fhdr.d_id);
5198 5198
5199 5199 bzero(&acc, sizeof (acc));
5200 5200 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5201 5201
5202 5202 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5203 5203 /*
5204 5204 * d_id of zero in a FLOGI accept response in a point to point
5205 5205 * topology triggers evaluation of N Port login initiative.
5206 5206 */
5207 5207 pkt->pkt_resp_fhdr.d_id = 0;
5208 5208 /*
5209 5209 * An N_Port already logged in with the firmware
5210 5210 * will have the only database entry.
5211 5211 */
5212 5212 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5213 5213 tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5214 5214 }
5215 5215
5216 5216 if (tq != NULL) {
5217 5217 /*
5218 5218 * If the target port has initiative send
5219 5219 * up a PLOGI about the new device.
5220 5220 */
5221 5221 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5222 5222 (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5223 5223 &ha->init_ctrl_blk.cb24.port_name[0] :
5224 5224 &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5225 5225 ha->send_plogi_timer = 3;
5226 5226 } else {
5227 5227 ha->send_plogi_timer = 0;
5228 5228 }
5229 5229 pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5230 5230 } else {
5231 5231 /*
5232 5232 * An N_Port not logged in with the firmware will not
5233 5233 * have a database entry. We accept anyway and rely
5234 5234 * on a PLOGI from the upper layers to set the d_id
5235 5235 * and s_id.
5236 5236 */
5237 5237 accept = 1;
5238 5238 }
5239 5239 } else {
5240 5240 tq = ql_d_id_to_queue(ha, d_id);
5241 5241 }
5242 5242 if ((tq != NULL) || (accept != NULL)) {
5243 5243 /* Build ACC. */
5244 5244 pkt->pkt_state = FC_PKT_SUCCESS;
5245 5245 class3_param = (class_svc_param_t *)&acc.class_3;
5246 5246
5247 5247 acc.ls_code.ls_code = LA_ELS_ACC;
5248 5248 acc.common_service.fcph_version = 0x2006;
5249 5249 if (ha->topology & QL_N_PORT) {
5250 5250 /* clear F_Port indicator */
5251 5251 acc.common_service.cmn_features = 0x0800;
5252 5252 } else {
5253 5253 acc.common_service.cmn_features = 0x1b00;
5254 5254 }
5255 5255 CFG_IST(ha, CFG_CTRL_24258081) ?
5256 5256 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5257 5257 ha->init_ctrl_blk.cb24.max_frame_length[0],
5258 5258 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5259 5259 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5260 5260 ha->init_ctrl_blk.cb.max_frame_length[0],
5261 5261 ha->init_ctrl_blk.cb.max_frame_length[1]));
5262 5262 acc.common_service.conc_sequences = 0xff;
5263 5263 acc.common_service.relative_offset = 0x03;
5264 5264 acc.common_service.e_d_tov = 0x7d0;
5265 5265 if (accept) {
5266 5266 /* Use the saved N_Port WWNN and WWPN */
5267 5267 if (ha->n_port != NULL) {
5268 5268 bcopy((void *)&ha->n_port->port_name[0],
5269 5269 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5270 5270 bcopy((void *)&ha->n_port->node_name[0],
5271 5271 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5272 5272 /* mark service options invalid */
5273 5273 class3_param->class_valid_svc_opt = 0x0800;
5274 5274 } else {
5275 5275 EL(ha, "ha->n_port is NULL\n");
5276 5276 /* Build RJT. */
5277 5277 acc.ls_code.ls_code = LA_ELS_RJT;
5278 5278
5279 5279 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5280 5280 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5281 5281 }
5282 5282 } else {
5283 5283 bcopy((void *)&tq->port_name[0],
5284 5284 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5285 5285 bcopy((void *)&tq->node_name[0],
5286 5286 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5287 5287
5288 5288 class3_param = (class_svc_param_t *)&acc.class_3;
5289 5289 class3_param->class_valid_svc_opt = 0x8800;
5290 5290 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5291 5291 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5292 5292 class3_param->conc_sequences =
5293 5293 tq->class3_conc_sequences;
5294 5294 class3_param->open_sequences_per_exch =
5295 5295 tq->class3_open_sequences_per_exch;
5296 5296 }
5297 5297 } else {
5298 5298 /* Build RJT. */
5299 5299 acc.ls_code.ls_code = LA_ELS_RJT;
5300 5300
5301 5301 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5302 5302 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5303 5303 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5304 5304 }
5305 5305
5306 5306 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5307 5307 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5308 5308
5309 5309 if (rval != FC_SUCCESS) {
5310 5310 EL(ha, "failed, rval = %xh\n", rval);
5311 5311 } else {
5312 5312 /*EMPTY*/
5313 5313 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5314 5314 }
5315 5315 return (rval);
5316 5316 }
5317 5317
5318 5318 /*
5319 5319 * ql_els_logo
5320 5320 * Issue a extended link service logout request.
5321 5321 *
5322 5322 * Input:
5323 5323 * ha = adapter state pointer.
5324 5324 * pkt = pointer to fc_packet.
5325 5325 *
5326 5326 * Returns:
5327 5327 * FC_SUCCESS - the packet was accepted for transport.
5328 5328 * FC_TRANSPORT_ERROR - a transport error occurred.
5329 5329 *
5330 5330 * Context:
5331 5331 * Kernel context.
5332 5332 */
5333 5333 static int
5334 5334 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5335 5335 {
5336 5336 port_id_t d_id;
5337 5337 ql_tgt_t *tq;
5338 5338 la_els_logo_t acc;
5339 5339 int rval = FC_SUCCESS;
5340 5340
5341 5341 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5342 5342 pkt->pkt_cmd_fhdr.d_id);
5343 5343
5344 5344 bzero(&acc, sizeof (acc));
5345 5345 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5346 5346
5347 5347 tq = ql_d_id_to_queue(ha, d_id);
5348 5348 if (tq) {
5349 5349 DEVICE_QUEUE_LOCK(tq);
5350 5350 if (tq->d_id.b24 == BROADCAST_ADDR) {
5351 5351 DEVICE_QUEUE_UNLOCK(tq);
5352 5352 return (FC_SUCCESS);
5353 5353 }
5354 5354
5355 5355 tq->flags |= TQF_NEED_AUTHENTICATION;
5356 5356
5357 5357 do {
5358 5358 DEVICE_QUEUE_UNLOCK(tq);
5359 5359 (void) ql_abort_device(ha, tq, 1);
5360 5360
5361 5361 /*
5362 5362 * Wait for commands to drain in F/W (doesn't
5363 5363 * take more than a few milliseconds)
5364 5364 */
5365 5365 ql_delay(ha, 10000);
5366 5366
5367 5367 DEVICE_QUEUE_LOCK(tq);
5368 5368 } while (tq->outcnt);
5369 5369
5370 5370 DEVICE_QUEUE_UNLOCK(tq);
5371 5371 }
5372 5372
5373 5373 if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5374 5374 /* Build ACC. */
5375 5375 acc.ls_code.ls_code = LA_ELS_ACC;
5376 5376
5377 5377 pkt->pkt_state = FC_PKT_SUCCESS;
5378 5378 } else {
5379 5379 /* Build RJT. */
5380 5380 acc.ls_code.ls_code = LA_ELS_RJT;
5381 5381
5382 5382 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5383 5383 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5384 5384 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5385 5385 }
5386 5386
5387 5387 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5388 5388 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5389 5389
5390 5390 if (rval != FC_SUCCESS) {
5391 5391 EL(ha, "failed, rval = %xh\n", rval);
5392 5392 } else {
5393 5393 /*EMPTY*/
5394 5394 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5395 5395 }
5396 5396 return (rval);
5397 5397 }
5398 5398
5399 5399 /*
5400 5400 * ql_els_prli
5401 5401 * Issue a extended link service process login request.
5402 5402 *
5403 5403 * Input:
5404 5404 * ha = adapter state pointer.
5405 5405 * pkt = pointer to fc_packet.
5406 5406 *
5407 5407 * Returns:
5408 5408 * FC_SUCCESS - the packet was accepted for transport.
5409 5409 * FC_TRANSPORT_ERROR - a transport error occurred.
5410 5410 *
5411 5411 * Context:
5412 5412 * Kernel context.
5413 5413 */
5414 5414 static int
5415 5415 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5416 5416 {
5417 5417 ql_tgt_t *tq;
5418 5418 port_id_t d_id;
5419 5419 la_els_prli_t acc;
5420 5420 prli_svc_param_t *param;
5421 5421 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5422 5422 int rval = FC_SUCCESS;
5423 5423
5424 5424 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5425 5425 pkt->pkt_cmd_fhdr.d_id);
5426 5426
5427 5427 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5428 5428
5429 5429 tq = ql_d_id_to_queue(ha, d_id);
5430 5430 if (tq != NULL) {
5431 5431 (void) ql_get_port_database(ha, tq, PDF_NONE);
5432 5432
5433 5433 if ((ha->topology & QL_N_PORT) &&
5434 5434 (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5435 5435 ql_timeout_insert(ha, tq, sp);
5436 5436 ql_start_iocb(ha, sp);
5437 5437 rval = QL_CONSUMED;
5438 5438 } else {
5439 5439 /* Build ACC. */
5440 5440 bzero(&acc, sizeof (acc));
5441 5441 acc.ls_code = LA_ELS_ACC;
5442 5442 acc.page_length = 0x10;
5443 5443 acc.payload_length = tq->prli_payload_length;
5444 5444
5445 5445 param = (prli_svc_param_t *)&acc.service_params[0];
5446 5446 param->type = 0x08;
5447 5447 param->rsvd = 0x00;
5448 5448 param->process_assoc_flags = tq->prli_svc_param_word_0;
5449 5449 param->process_flags = tq->prli_svc_param_word_3;
5450 5450
5451 5451 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5452 5452 (uint8_t *)pkt->pkt_resp, sizeof (acc),
5453 5453 DDI_DEV_AUTOINCR);
5454 5454
5455 5455 pkt->pkt_state = FC_PKT_SUCCESS;
5456 5456 }
5457 5457 } else {
5458 5458 la_els_rjt_t rjt;
5459 5459
5460 5460 /* Build RJT. */
5461 5461 bzero(&rjt, sizeof (rjt));
5462 5462 rjt.ls_code.ls_code = LA_ELS_RJT;
5463 5463
5464 5464 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5465 5465 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5466 5466
5467 5467 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5468 5468 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5469 5469 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5470 5470 }
5471 5471
5472 5472 if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5473 5473 EL(ha, "failed, rval = %xh\n", rval);
5474 5474 } else {
5475 5475 /*EMPTY*/
5476 5476 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5477 5477 }
5478 5478 return (rval);
5479 5479 }
5480 5480
5481 5481 /*
5482 5482 * ql_els_prlo
5483 5483 * Issue a extended link service process logout request.
5484 5484 *
5485 5485 * Input:
5486 5486 * ha = adapter state pointer.
5487 5487 * pkt = pointer to fc_packet.
5488 5488 *
5489 5489 * Returns:
5490 5490 * FC_SUCCESS - the packet was accepted for transport.
5491 5491 * FC_TRANSPORT_ERROR - a transport error occurred.
5492 5492 *
5493 5493 * Context:
5494 5494 * Kernel context.
5495 5495 */
5496 5496 /* ARGSUSED */
5497 5497 static int
5498 5498 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5499 5499 {
5500 5500 la_els_prli_t acc;
5501 5501 int rval = FC_SUCCESS;
5502 5502
5503 5503 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5504 5504 pkt->pkt_cmd_fhdr.d_id);
5505 5505
5506 5506 /* Build ACC. */
5507 5507 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5508 5508 (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5509 5509
5510 5510 acc.ls_code = LA_ELS_ACC;
5511 5511 acc.service_params[2] = 1;
5512 5512
5513 5513 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5514 5514 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5515 5515
5516 5516 pkt->pkt_state = FC_PKT_SUCCESS;
5517 5517
5518 5518 if (rval != FC_SUCCESS) {
5519 5519 EL(ha, "failed, rval = %xh\n", rval);
5520 5520 } else {
5521 5521 /*EMPTY*/
5522 5522 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5523 5523 }
5524 5524 return (rval);
5525 5525 }
5526 5526
5527 5527 /*
5528 5528 * ql_els_adisc
5529 5529 * Issue a extended link service address discovery request.
5530 5530 *
5531 5531 * Input:
5532 5532 * ha = adapter state pointer.
5533 5533 * pkt = pointer to fc_packet.
5534 5534 *
5535 5535 * Returns:
5536 5536 * FC_SUCCESS - the packet was accepted for transport.
5537 5537 * FC_TRANSPORT_ERROR - a transport error occurred.
5538 5538 *
5539 5539 * Context:
5540 5540 * Kernel context.
5541 5541 */
5542 5542 static int
5543 5543 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5544 5544 {
5545 5545 ql_dev_id_list_t *list;
5546 5546 uint32_t list_size;
5547 5547 ql_link_t *link;
5548 5548 ql_tgt_t *tq;
5549 5549 ql_lun_t *lq;
5550 5550 port_id_t d_id;
5551 5551 la_els_adisc_t acc;
5552 5552 uint16_t index, loop_id;
5553 5553 ql_mbx_data_t mr;
5554 5554 int rval = FC_SUCCESS;
5555 5555
5556 5556 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5557 5557
5558 5558 bzero(&acc, sizeof (acc));
5559 5559 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5560 5560
5561 5561 /*
5562 5562 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5563 5563 * the device from the firmware
5564 5564 */
5565 5565 index = ql_alpa_to_index[d_id.b.al_pa];
5566 5566 tq = NULL;
5567 5567 for (link = ha->dev[index].first; link != NULL; link = link->next) {
5568 5568 tq = link->base_address;
5569 5569 if (tq->d_id.b24 == d_id.b24) {
5570 5570 break;
5571 5571 } else {
5572 5572 tq = NULL;
5573 5573 }
5574 5574 }
5575 5575
5576 5576 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5577 5577 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5578 5578 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5579 5579
5580 5580 if (list != NULL &&
5581 5581 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5582 5582 QL_SUCCESS) {
5583 5583
5584 5584 for (index = 0; index < mr.mb[1]; index++) {
5585 5585 ql_dev_list(ha, list, index, &d_id, &loop_id);
5586 5586
5587 5587 if (tq->d_id.b24 == d_id.b24) {
5588 5588 tq->loop_id = loop_id;
5589 5589 break;
5590 5590 }
5591 5591 }
5592 5592 } else {
5593 5593 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5594 5594 QL_NAME, ha->instance, d_id.b24);
5595 5595 tq = NULL;
5596 5596 }
5597 5597 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5598 5598 cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5599 5599 QL_NAME, ha->instance, tq->d_id.b24);
5600 5600 tq = NULL;
5601 5601 }
5602 5602
5603 5603 if (list != NULL) {
5604 5604 kmem_free(list, list_size);
5605 5605 }
5606 5606 }
5607 5607
5608 5608 if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5609 5609 ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5610 5610
5611 5611 /* Build ACC. */
5612 5612
5613 5613 DEVICE_QUEUE_LOCK(tq);
5614 5614 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5615 5615 if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5616 5616 for (link = tq->lun_queues.first; link != NULL;
5617 5617 link = link->next) {
5618 5618 lq = link->base_address;
5619 5619
5620 5620 if (lq->cmd.first != NULL) {
5621 5621 ql_next(ha, lq);
5622 5622 DEVICE_QUEUE_LOCK(tq);
5623 5623 }
5624 5624 }
5625 5625 }
5626 5626 DEVICE_QUEUE_UNLOCK(tq);
5627 5627
5628 5628 acc.ls_code.ls_code = LA_ELS_ACC;
5629 5629 acc.hard_addr.hard_addr = tq->hard_addr.b24;
5630 5630
5631 5631 bcopy((void *)&tq->port_name[0],
5632 5632 (void *)&acc.port_wwn.raw_wwn[0], 8);
5633 5633 bcopy((void *)&tq->node_name[0],
5634 5634 (void *)&acc.node_wwn.raw_wwn[0], 8);
5635 5635
5636 5636 acc.nport_id.port_id = tq->d_id.b24;
5637 5637
5638 5638 pkt->pkt_state = FC_PKT_SUCCESS;
5639 5639 } else {
5640 5640 /* Build RJT. */
5641 5641 acc.ls_code.ls_code = LA_ELS_RJT;
5642 5642
5643 5643 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5644 5644 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5645 5645 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5646 5646 }
5647 5647
5648 5648 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5649 5649 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5650 5650
5651 5651 if (rval != FC_SUCCESS) {
5652 5652 EL(ha, "failed, rval = %xh\n", rval);
5653 5653 } else {
5654 5654 /*EMPTY*/
5655 5655 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5656 5656 }
5657 5657 return (rval);
5658 5658 }
5659 5659
5660 5660 /*
5661 5661 * ql_els_linit
5662 5662 * Issue a extended link service loop initialize request.
5663 5663 *
5664 5664 * Input:
5665 5665 * ha = adapter state pointer.
5666 5666 * pkt = pointer to fc_packet.
5667 5667 *
5668 5668 * Returns:
5669 5669 * FC_SUCCESS - the packet was accepted for transport.
5670 5670 * FC_TRANSPORT_ERROR - a transport error occurred.
5671 5671 *
5672 5672 * Context:
5673 5673 * Kernel context.
5674 5674 */
5675 5675 static int
5676 5676 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5677 5677 {
5678 5678 ddi_dma_cookie_t *cp;
5679 5679 uint32_t cnt;
5680 5680 conv_num_t n;
5681 5681 port_id_t d_id;
5682 5682 int rval = FC_SUCCESS;
5683 5683
5684 5684 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5685 5685
5686 5686 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5687 5687 if (ha->topology & QL_SNS_CONNECTION) {
5688 5688 fc_linit_req_t els;
5689 5689 lfa_cmd_t lfa;
5690 5690
5691 5691 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5692 5692 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5693 5693
5694 5694 /* Setup LFA mailbox command data. */
5695 5695 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5696 5696
5697 5697 lfa.resp_buffer_length[0] = 4;
5698 5698
5699 5699 cp = pkt->pkt_resp_cookie;
5700 5700 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5701 5701 n.size64 = (uint64_t)cp->dmac_laddress;
5702 5702 LITTLE_ENDIAN_64(&n.size64);
5703 5703 } else {
5704 5704 n.size32[0] = LSD(cp->dmac_laddress);
5705 5705 LITTLE_ENDIAN_32(&n.size32[0]);
5706 5706 n.size32[1] = MSD(cp->dmac_laddress);
5707 5707 LITTLE_ENDIAN_32(&n.size32[1]);
5708 5708 }
5709 5709
5710 5710 /* Set buffer address. */
5711 5711 for (cnt = 0; cnt < 8; cnt++) {
5712 5712 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5713 5713 }
5714 5714
5715 5715 lfa.subcommand_length[0] = 4;
5716 5716 n.size32[0] = d_id.b24;
5717 5717 LITTLE_ENDIAN_32(&n.size32[0]);
5718 5718 lfa.addr[0] = n.size8[0];
5719 5719 lfa.addr[1] = n.size8[1];
5720 5720 lfa.addr[2] = n.size8[2];
5721 5721 lfa.subcommand[1] = 0x70;
5722 5722 lfa.payload[2] = els.func;
5723 5723 lfa.payload[4] = els.lip_b3;
5724 5724 lfa.payload[5] = els.lip_b4;
5725 5725
5726 5726 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5727 5727 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5728 5728 } else {
5729 5729 pkt->pkt_state = FC_PKT_SUCCESS;
5730 5730 }
5731 5731 } else {
5732 5732 fc_linit_resp_t rjt;
5733 5733
5734 5734 /* Build RJT. */
5735 5735 bzero(&rjt, sizeof (rjt));
5736 5736 rjt.ls_code.ls_code = LA_ELS_RJT;
5737 5737
5738 5738 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5739 5739 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5740 5740
5741 5741 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5742 5742 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5743 5743 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5744 5744 }
5745 5745
5746 5746 if (rval != FC_SUCCESS) {
5747 5747 EL(ha, "failed, rval = %xh\n", rval);
5748 5748 } else {
5749 5749 /*EMPTY*/
5750 5750 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5751 5751 }
5752 5752 return (rval);
5753 5753 }
5754 5754
5755 5755 /*
5756 5756 * ql_els_lpc
5757 5757 * Issue a extended link service loop control request.
5758 5758 *
5759 5759 * Input:
5760 5760 * ha = adapter state pointer.
5761 5761 * pkt = pointer to fc_packet.
5762 5762 *
5763 5763 * Returns:
5764 5764 * FC_SUCCESS - the packet was accepted for transport.
5765 5765 * FC_TRANSPORT_ERROR - a transport error occurred.
5766 5766 *
5767 5767 * Context:
5768 5768 * Kernel context.
5769 5769 */
5770 5770 static int
5771 5771 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5772 5772 {
5773 5773 ddi_dma_cookie_t *cp;
5774 5774 uint32_t cnt;
5775 5775 conv_num_t n;
5776 5776 port_id_t d_id;
5777 5777 int rval = FC_SUCCESS;
5778 5778
5779 5779 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5780 5780
5781 5781 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5782 5782 if (ha->topology & QL_SNS_CONNECTION) {
5783 5783 ql_lpc_t els;
5784 5784 lfa_cmd_t lfa;
5785 5785
5786 5786 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5787 5787 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5788 5788
5789 5789 /* Setup LFA mailbox command data. */
5790 5790 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5791 5791
5792 5792 lfa.resp_buffer_length[0] = 4;
5793 5793
5794 5794 cp = pkt->pkt_resp_cookie;
5795 5795 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5796 5796 n.size64 = (uint64_t)(cp->dmac_laddress);
5797 5797 LITTLE_ENDIAN_64(&n.size64);
5798 5798 } else {
5799 5799 n.size32[0] = cp->dmac_address;
5800 5800 LITTLE_ENDIAN_32(&n.size32[0]);
5801 5801 n.size32[1] = 0;
5802 5802 }
5803 5803
5804 5804 /* Set buffer address. */
5805 5805 for (cnt = 0; cnt < 8; cnt++) {
5806 5806 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5807 5807 }
5808 5808
5809 5809 lfa.subcommand_length[0] = 20;
5810 5810 n.size32[0] = d_id.b24;
5811 5811 LITTLE_ENDIAN_32(&n.size32[0]);
5812 5812 lfa.addr[0] = n.size8[0];
5813 5813 lfa.addr[1] = n.size8[1];
5814 5814 lfa.addr[2] = n.size8[2];
5815 5815 lfa.subcommand[1] = 0x71;
5816 5816 lfa.payload[4] = els.port_control;
5817 5817 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5818 5818
5819 5819 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5820 5820 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5821 5821 } else {
5822 5822 pkt->pkt_state = FC_PKT_SUCCESS;
5823 5823 }
5824 5824 } else {
5825 5825 ql_lpc_resp_t rjt;
5826 5826
5827 5827 /* Build RJT. */
5828 5828 bzero(&rjt, sizeof (rjt));
5829 5829 rjt.ls_code.ls_code = LA_ELS_RJT;
5830 5830
5831 5831 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5832 5832 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5833 5833
5834 5834 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5835 5835 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5836 5836 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5837 5837 }
5838 5838
5839 5839 if (rval != FC_SUCCESS) {
5840 5840 EL(ha, "failed, rval = %xh\n", rval);
5841 5841 } else {
5842 5842 /*EMPTY*/
5843 5843 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5844 5844 }
5845 5845 return (rval);
5846 5846 }
5847 5847
5848 5848 /*
5849 5849 * ql_els_lsts
5850 5850 * Issue a extended link service loop status request.
5851 5851 *
5852 5852 * Input:
5853 5853 * ha = adapter state pointer.
5854 5854 * pkt = pointer to fc_packet.
5855 5855 *
5856 5856 * Returns:
5857 5857 * FC_SUCCESS - the packet was accepted for transport.
5858 5858 * FC_TRANSPORT_ERROR - a transport error occurred.
5859 5859 *
5860 5860 * Context:
5861 5861 * Kernel context.
5862 5862 */
5863 5863 static int
5864 5864 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5865 5865 {
5866 5866 ddi_dma_cookie_t *cp;
5867 5867 uint32_t cnt;
5868 5868 conv_num_t n;
5869 5869 port_id_t d_id;
5870 5870 int rval = FC_SUCCESS;
5871 5871
5872 5872 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5873 5873
5874 5874 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5875 5875 if (ha->topology & QL_SNS_CONNECTION) {
5876 5876 fc_lsts_req_t els;
5877 5877 lfa_cmd_t lfa;
5878 5878
5879 5879 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5880 5880 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5881 5881
5882 5882 /* Setup LFA mailbox command data. */
5883 5883 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5884 5884
5885 5885 lfa.resp_buffer_length[0] = 84;
5886 5886
5887 5887 cp = pkt->pkt_resp_cookie;
5888 5888 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5889 5889 n.size64 = cp->dmac_laddress;
5890 5890 LITTLE_ENDIAN_64(&n.size64);
5891 5891 } else {
5892 5892 n.size32[0] = cp->dmac_address;
5893 5893 LITTLE_ENDIAN_32(&n.size32[0]);
5894 5894 n.size32[1] = 0;
5895 5895 }
5896 5896
5897 5897 /* Set buffer address. */
5898 5898 for (cnt = 0; cnt < 8; cnt++) {
5899 5899 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5900 5900 }
5901 5901
5902 5902 lfa.subcommand_length[0] = 2;
5903 5903 n.size32[0] = d_id.b24;
5904 5904 LITTLE_ENDIAN_32(&n.size32[0]);
5905 5905 lfa.addr[0] = n.size8[0];
5906 5906 lfa.addr[1] = n.size8[1];
5907 5907 lfa.addr[2] = n.size8[2];
5908 5908 lfa.subcommand[1] = 0x72;
5909 5909
5910 5910 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5911 5911 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5912 5912 } else {
5913 5913 pkt->pkt_state = FC_PKT_SUCCESS;
5914 5914 }
5915 5915 } else {
5916 5916 fc_lsts_resp_t rjt;
5917 5917
5918 5918 /* Build RJT. */
5919 5919 bzero(&rjt, sizeof (rjt));
5920 5920 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5921 5921
5922 5922 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5923 5923 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5924 5924
5925 5925 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5926 5926 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5927 5927 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5928 5928 }
5929 5929
5930 5930 if (rval != FC_SUCCESS) {
5931 5931 EL(ha, "failed=%xh\n", rval);
5932 5932 } else {
5933 5933 /*EMPTY*/
5934 5934 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5935 5935 }
5936 5936 return (rval);
5937 5937 }
5938 5938
5939 5939 /*
5940 5940 * ql_els_scr
5941 5941 * Issue a extended link service state change registration request.
5942 5942 *
5943 5943 * Input:
5944 5944 * ha = adapter state pointer.
5945 5945 * pkt = pointer to fc_packet.
5946 5946 *
5947 5947 * Returns:
5948 5948 * FC_SUCCESS - the packet was accepted for transport.
5949 5949 * FC_TRANSPORT_ERROR - a transport error occurred.
5950 5950 *
5951 5951 * Context:
5952 5952 * Kernel context.
5953 5953 */
5954 5954 static int
5955 5955 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5956 5956 {
5957 5957 fc_scr_resp_t acc;
5958 5958 int rval = FC_SUCCESS;
5959 5959
5960 5960 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5961 5961
5962 5962 bzero(&acc, sizeof (acc));
5963 5963 if (ha->topology & QL_SNS_CONNECTION) {
5964 5964 fc_scr_req_t els;
5965 5965
5966 5966 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5967 5967 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5968 5968
5969 5969 if (ql_send_change_request(ha, els.scr_func) ==
5970 5970 QL_SUCCESS) {
5971 5971 /* Build ACC. */
5972 5972 acc.scr_acc = LA_ELS_ACC;
5973 5973
5974 5974 pkt->pkt_state = FC_PKT_SUCCESS;
5975 5975 } else {
5976 5976 /* Build RJT. */
5977 5977 acc.scr_acc = LA_ELS_RJT;
5978 5978
5979 5979 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5980 5980 pkt->pkt_reason = FC_REASON_HW_ERROR;
5981 5981 EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5982 5982 }
5983 5983 } else {
5984 5984 /* Build RJT. */
5985 5985 acc.scr_acc = LA_ELS_RJT;
5986 5986
5987 5987 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5988 5988 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5989 5989 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5990 5990 }
5991 5991
5992 5992 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5993 5993 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5994 5994
5995 5995 if (rval != FC_SUCCESS) {
5996 5996 EL(ha, "failed, rval = %xh\n", rval);
5997 5997 } else {
5998 5998 /*EMPTY*/
5999 5999 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6000 6000 }
6001 6001 return (rval);
6002 6002 }
6003 6003
6004 6004 /*
6005 6005 * ql_els_rscn
6006 6006 * Issue a extended link service register state
6007 6007 * change notification request.
6008 6008 *
6009 6009 * Input:
6010 6010 * ha = adapter state pointer.
6011 6011 * pkt = pointer to fc_packet.
6012 6012 *
6013 6013 * Returns:
6014 6014 * FC_SUCCESS - the packet was accepted for transport.
6015 6015 * FC_TRANSPORT_ERROR - a transport error occurred.
6016 6016 *
6017 6017 * Context:
6018 6018 * Kernel context.
6019 6019 */
6020 6020 static int
6021 6021 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
6022 6022 {
6023 6023 ql_rscn_resp_t acc;
6024 6024 int rval = FC_SUCCESS;
6025 6025
6026 6026 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6027 6027
6028 6028 bzero(&acc, sizeof (acc));
6029 6029 if (ha->topology & QL_SNS_CONNECTION) {
6030 6030 /* Build ACC. */
6031 6031 acc.scr_acc = LA_ELS_ACC;
6032 6032
6033 6033 pkt->pkt_state = FC_PKT_SUCCESS;
6034 6034 } else {
6035 6035 /* Build RJT. */
6036 6036 acc.scr_acc = LA_ELS_RJT;
6037 6037
6038 6038 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6039 6039 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6040 6040 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6041 6041 }
6042 6042
6043 6043 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6044 6044 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6045 6045
6046 6046 if (rval != FC_SUCCESS) {
6047 6047 EL(ha, "failed, rval = %xh\n", rval);
6048 6048 } else {
6049 6049 /*EMPTY*/
6050 6050 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6051 6051 }
6052 6052 return (rval);
6053 6053 }
6054 6054
6055 6055 /*
6056 6056 * ql_els_farp_req
6057 6057 * Issue FC Address Resolution Protocol (FARP)
6058 6058 * extended link service request.
6059 6059 *
6060 6060 * Note: not supported.
6061 6061 *
6062 6062 * Input:
6063 6063 * ha = adapter state pointer.
6064 6064 * pkt = pointer to fc_packet.
6065 6065 *
6066 6066 * Returns:
6067 6067 * FC_SUCCESS - the packet was accepted for transport.
6068 6068 * FC_TRANSPORT_ERROR - a transport error occurred.
6069 6069 *
6070 6070 * Context:
6071 6071 * Kernel context.
6072 6072 */
6073 6073 static int
6074 6074 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6075 6075 {
6076 6076 ql_acc_rjt_t acc;
6077 6077 int rval = FC_SUCCESS;
6078 6078
6079 6079 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6080 6080
6081 6081 bzero(&acc, sizeof (acc));
6082 6082
6083 6083 /* Build ACC. */
6084 6084 acc.ls_code.ls_code = LA_ELS_ACC;
6085 6085
6086 6086 pkt->pkt_state = FC_PKT_SUCCESS;
6087 6087
6088 6088 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6089 6089 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6090 6090
6091 6091 if (rval != FC_SUCCESS) {
6092 6092 EL(ha, "failed, rval = %xh\n", rval);
6093 6093 } else {
6094 6094 /*EMPTY*/
6095 6095 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6096 6096 }
6097 6097 return (rval);
6098 6098 }
6099 6099
6100 6100 /*
6101 6101 * ql_els_farp_reply
6102 6102 * Issue FC Address Resolution Protocol (FARP)
6103 6103 * extended link service reply.
6104 6104 *
6105 6105 * Note: not supported.
6106 6106 *
6107 6107 * Input:
6108 6108 * ha = adapter state pointer.
6109 6109 * pkt = pointer to fc_packet.
6110 6110 *
6111 6111 * Returns:
6112 6112 * FC_SUCCESS - the packet was accepted for transport.
6113 6113 * FC_TRANSPORT_ERROR - a transport error occurred.
6114 6114 *
6115 6115 * Context:
6116 6116 * Kernel context.
6117 6117 */
6118 6118 /* ARGSUSED */
6119 6119 static int
6120 6120 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6121 6121 {
6122 6122 ql_acc_rjt_t acc;
6123 6123 int rval = FC_SUCCESS;
6124 6124
6125 6125 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6126 6126
6127 6127 bzero(&acc, sizeof (acc));
6128 6128
6129 6129 /* Build ACC. */
6130 6130 acc.ls_code.ls_code = LA_ELS_ACC;
6131 6131
6132 6132 pkt->pkt_state = FC_PKT_SUCCESS;
6133 6133
6134 6134 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6135 6135 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6136 6136
6137 6137 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6138 6138
6139 6139 return (rval);
6140 6140 }
6141 6141
6142 6142 static int
6143 6143 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6144 6144 {
6145 6145 uchar_t *rnid_acc;
6146 6146 port_id_t d_id;
6147 6147 ql_link_t *link;
6148 6148 ql_tgt_t *tq;
6149 6149 uint16_t index;
6150 6150 la_els_rnid_acc_t acc;
6151 6151 la_els_rnid_t *req;
6152 6152 size_t req_len;
6153 6153
6154 6154 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6155 6155
6156 6156 req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6157 6157 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6158 6158 index = ql_alpa_to_index[d_id.b.al_pa];
6159 6159
6160 6160 tq = NULL;
6161 6161 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6162 6162 tq = link->base_address;
6163 6163 if (tq->d_id.b24 == d_id.b24) {
6164 6164 break;
6165 6165 } else {
6166 6166 tq = NULL;
6167 6167 }
6168 6168 }
6169 6169
6170 6170 /* Allocate memory for rnid status block */
6171 6171 rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6172 6172
6173 6173 bzero(&acc, sizeof (acc));
6174 6174
6175 6175 req = (la_els_rnid_t *)pkt->pkt_cmd;
6176 6176 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6177 6177 (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6178 6178 (caddr_t)rnid_acc) != QL_SUCCESS)) {
6179 6179
6180 6180 kmem_free(rnid_acc, req_len);
6181 6181 acc.ls_code.ls_code = LA_ELS_RJT;
6182 6182
6183 6183 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6184 6184 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6185 6185
6186 6186 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6187 6187 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6188 6188 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6189 6189
6190 6190 return (FC_FAILURE);
6191 6191 }
6192 6192
6193 6193 acc.ls_code.ls_code = LA_ELS_ACC;
6194 6194 bcopy(rnid_acc, &acc.hdr, req_len);
6195 6195 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6196 6196 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6197 6197
6198 6198 kmem_free(rnid_acc, req_len);
6199 6199 pkt->pkt_state = FC_PKT_SUCCESS;
6200 6200
6201 6201 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6202 6202
6203 6203 return (FC_SUCCESS);
6204 6204 }
6205 6205
6206 6206 static int
6207 6207 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6208 6208 {
6209 6209 fc_rls_acc_t *rls_acc;
6210 6210 port_id_t d_id;
6211 6211 ql_link_t *link;
6212 6212 ql_tgt_t *tq;
6213 6213 uint16_t index;
6214 6214 la_els_rls_acc_t acc;
6215 6215
6216 6216 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6217 6217
6218 6218 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6219 6219 index = ql_alpa_to_index[d_id.b.al_pa];
6220 6220
6221 6221 tq = NULL;
6222 6222 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6223 6223 tq = link->base_address;
6224 6224 if (tq->d_id.b24 == d_id.b24) {
6225 6225 break;
6226 6226 } else {
6227 6227 tq = NULL;
6228 6228 }
6229 6229 }
6230 6230
6231 6231 /* Allocate memory for link error status block */
6232 6232 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6233 6233
6234 6234 bzero(&acc, sizeof (la_els_rls_acc_t));
6235 6235
6236 6236 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6237 6237 (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6238 6238 (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6239 6239
6240 6240 kmem_free(rls_acc, sizeof (*rls_acc));
6241 6241 acc.ls_code.ls_code = LA_ELS_RJT;
6242 6242
6243 6243 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6244 6244 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6245 6245
6246 6246 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6247 6247 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6248 6248 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6249 6249
6250 6250 return (FC_FAILURE);
6251 6251 }
6252 6252
6253 6253 LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6254 6254 LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6255 6255 LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6256 6256 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6257 6257 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6258 6258
6259 6259 acc.ls_code.ls_code = LA_ELS_ACC;
6260 6260 acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6261 6261 acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6262 6262 acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6263 6263 acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6264 6264 acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6265 6265 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6266 6266 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6267 6267
6268 6268 kmem_free(rls_acc, sizeof (*rls_acc));
6269 6269 pkt->pkt_state = FC_PKT_SUCCESS;
6270 6270
6271 6271 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6272 6272
6273 6273 return (FC_SUCCESS);
6274 6274 }
6275 6275
6276 6276 static int
6277 6277 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6278 6278 {
6279 6279 port_id_t d_id;
6280 6280 ql_srb_t *sp;
6281 6281 fc_unsol_buf_t *ubp;
6282 6282 ql_link_t *link, *next_link;
6283 6283 int rval = FC_SUCCESS;
6284 6284 int cnt = 5;
6285 6285
6286 6286 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6287 6287
6288 6288 /*
6289 6289 * we need to ensure that q->outcnt == 0, otherwise
6290 6290 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6291 6291 * will confuse ulps.
6292 6292 */
6293 6293
6294 6294 DEVICE_QUEUE_LOCK(tq);
6295 6295 do {
6296 6296 /*
6297 6297 * wait for the cmds to get drained. If they
6298 6298 * don't get drained then the transport will
6299 6299 * retry PLOGI after few secs.
6300 6300 */
6301 6301 if (tq->outcnt != 0) {
6302 6302 rval = FC_TRAN_BUSY;
6303 6303 DEVICE_QUEUE_UNLOCK(tq);
6304 6304 ql_delay(ha, 10000);
6305 6305 DEVICE_QUEUE_LOCK(tq);
6306 6306 cnt--;
6307 6307 if (!cnt) {
6308 6308 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6309 6309 " for %xh outcount %xh", QL_NAME,
6310 6310 ha->instance, tq->d_id.b24, tq->outcnt);
6311 6311 }
6312 6312 } else {
6313 6313 rval = FC_SUCCESS;
6314 6314 break;
6315 6315 }
6316 6316 } while (cnt > 0);
6317 6317 DEVICE_QUEUE_UNLOCK(tq);
6318 6318
6319 6319 /*
6320 6320 * return, if busy or if the plogi was asynchronous.
6321 6321 */
6322 6322 if ((rval != FC_SUCCESS) ||
6323 6323 (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6324 6324 pkt->pkt_comp)) {
6325 6325 QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6326 6326 ha->instance);
6327 6327 return (rval);
6328 6328 }
6329 6329
6330 6330 /*
6331 6331 * Let us give daemon sufficient time and hopefully
6332 6332 * when transport retries PLOGI, it would have flushed
6333 6333 * callback queue.
6334 6334 */
6335 6335 TASK_DAEMON_LOCK(ha);
6336 6336 for (link = ha->callback_queue.first; link != NULL;
6337 6337 link = next_link) {
6338 6338 next_link = link->next;
6339 6339 sp = link->base_address;
6340 6340 if (sp->flags & SRB_UB_CALLBACK) {
6341 6341 ubp = ha->ub_array[sp->handle];
6342 6342 d_id.b24 = ubp->ub_frame.s_id;
6343 6343 } else {
6344 6344 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6345 6345 }
6346 6346 if (tq->d_id.b24 == d_id.b24) {
6347 6347 cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6348 6348 ha->instance, tq->d_id.b24);
6349 6349 rval = FC_TRAN_BUSY;
6350 6350 break;
6351 6351 }
6352 6352 }
6353 6353 TASK_DAEMON_UNLOCK(ha);
6354 6354
6355 6355 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6356 6356
6357 6357 return (rval);
6358 6358 }
6359 6359
6360 6360 /*
6361 6361 * ql_login_port
6362 6362 * Logs in a device if not already logged in.
6363 6363 *
6364 6364 * Input:
6365 6365 * ha = adapter state pointer.
6366 6366 * d_id = 24 bit port ID.
6367 6367 * DEVICE_QUEUE_LOCK must be released.
6368 6368 *
6369 6369 * Returns:
6370 6370 * QL local function return status code.
6371 6371 *
6372 6372 * Context:
6373 6373 * Kernel context.
6374 6374 */
6375 6375 static int
6376 6376 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6377 6377 {
6378 6378 ql_adapter_state_t *vha;
6379 6379 ql_link_t *link;
6380 6380 uint16_t index;
6381 6381 ql_tgt_t *tq, *tq2;
6382 6382 uint16_t loop_id, first_loop_id, last_loop_id;
6383 6383 int rval = QL_SUCCESS;
6384 6384
6385 6385 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6386 6386 d_id.b24);
6387 6387
6388 6388 /* Get head queue index. */
6389 6389 index = ql_alpa_to_index[d_id.b.al_pa];
6390 6390
6391 6391 /* Check for device already has a queue. */
6392 6392 tq = NULL;
6393 6393 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6394 6394 tq = link->base_address;
6395 6395 if (tq->d_id.b24 == d_id.b24) {
6396 6396 loop_id = tq->loop_id;
6397 6397 break;
6398 6398 } else {
6399 6399 tq = NULL;
6400 6400 }
6401 6401 }
6402 6402
6403 6403 /* Let's stop issuing any IO and unsolicited logo */
6404 6404 if ((tq != NULL) && (!(ddi_in_panic()))) {
6405 6405 DEVICE_QUEUE_LOCK(tq);
6406 6406 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6407 6407 tq->flags &= ~TQF_RSCN_RCVD;
6408 6408 DEVICE_QUEUE_UNLOCK(tq);
6409 6409 }
6410 6410 if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6411 6411 !(tq->flags & TQF_FABRIC_DEVICE)) {
6412 6412 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6413 6413 }
6414 6414
6415 6415 /* Special case for Nameserver */
6416 6416 if (d_id.b24 == 0xFFFFFC) {
6417 6417 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6418 6418 SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6419 6419 if (tq == NULL) {
6420 6420 ADAPTER_STATE_LOCK(ha);
6421 6421 tq = ql_dev_init(ha, d_id, loop_id);
6422 6422 ADAPTER_STATE_UNLOCK(ha);
6423 6423 if (tq == NULL) {
6424 6424 EL(ha, "failed=%xh, d_id=%xh\n",
6425 6425 QL_FUNCTION_FAILED, d_id.b24);
6426 6426 return (QL_FUNCTION_FAILED);
6427 6427 }
6428 6428 }
6429 6429 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6430 6430 rval = ql_login_fabric_port(ha, tq, loop_id);
6431 6431 if (rval == QL_SUCCESS) {
6432 6432 tq->loop_id = loop_id;
6433 6433 tq->flags |= TQF_FABRIC_DEVICE;
6434 6434 (void) ql_get_port_database(ha, tq, PDF_NONE);
6435 6435 }
6436 6436 } else {
6437 6437 ha->topology = (uint8_t)
6438 6438 (ha->topology | QL_SNS_CONNECTION);
6439 6439 }
6440 6440 /* Check for device already logged in. */
6441 6441 } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6442 6442 if (tq->flags & TQF_FABRIC_DEVICE) {
6443 6443 rval = ql_login_fabric_port(ha, tq, loop_id);
6444 6444 if (rval == QL_PORT_ID_USED) {
6445 6445 rval = QL_SUCCESS;
6446 6446 }
6447 6447 } else if (LOCAL_LOOP_ID(loop_id)) {
6448 6448 rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6449 6449 (tq->flags & TQF_INITIATOR_DEVICE ?
6450 6450 LLF_NONE : LLF_PLOGI));
6451 6451 if (rval == QL_SUCCESS) {
6452 6452 DEVICE_QUEUE_LOCK(tq);
6453 6453 tq->loop_id = loop_id;
6454 6454 DEVICE_QUEUE_UNLOCK(tq);
6455 6455 }
6456 6456 }
6457 6457 } else if (ha->topology & QL_SNS_CONNECTION) {
6458 6458 /* Locate unused loop ID. */
6459 6459 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6460 6460 first_loop_id = 0;
6461 6461 last_loop_id = LAST_N_PORT_HDL;
6462 6462 } else if (ha->topology & QL_F_PORT) {
6463 6463 first_loop_id = 0;
6464 6464 last_loop_id = SNS_LAST_LOOP_ID;
6465 6465 } else {
6466 6466 first_loop_id = SNS_FIRST_LOOP_ID;
6467 6467 last_loop_id = SNS_LAST_LOOP_ID;
6468 6468 }
6469 6469
6470 6470 /* Acquire adapter state lock. */
6471 6471 ADAPTER_STATE_LOCK(ha);
6472 6472
6473 6473 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6474 6474 if (tq == NULL) {
6475 6475 EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6476 6476 d_id.b24);
6477 6477
6478 6478 ADAPTER_STATE_UNLOCK(ha);
6479 6479
6480 6480 return (QL_FUNCTION_FAILED);
6481 6481 }
6482 6482
6483 6483 rval = QL_FUNCTION_FAILED;
6484 6484 loop_id = ha->pha->free_loop_id++;
6485 6485 for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6486 6486 index--) {
6487 6487 if (loop_id < first_loop_id ||
6488 6488 loop_id > last_loop_id) {
6489 6489 loop_id = first_loop_id;
6490 6490 ha->pha->free_loop_id = (uint16_t)
6491 6491 (loop_id + 1);
6492 6492 }
6493 6493
6494 6494 /* Bypass if loop ID used. */
6495 6495 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6496 6496 tq2 = ql_loop_id_to_queue(vha, loop_id);
6497 6497 if (tq2 != NULL && tq2 != tq) {
6498 6498 break;
6499 6499 }
6500 6500 }
6501 6501 if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6502 6502 loop_id == ha->loop_id) {
6503 6503 loop_id = ha->pha->free_loop_id++;
6504 6504 continue;
6505 6505 }
6506 6506
6507 6507 ADAPTER_STATE_UNLOCK(ha);
6508 6508 rval = ql_login_fabric_port(ha, tq, loop_id);
6509 6509
6510 6510 /*
6511 6511 * If PORT_ID_USED is returned
6512 6512 * the login_fabric_port() updates
6513 6513 * with the correct loop ID
6514 6514 */
6515 6515 switch (rval) {
6516 6516 case QL_PORT_ID_USED:
6517 6517 /*
6518 6518 * use f/w handle and try to
6519 6519 * login again.
6520 6520 */
6521 6521 ADAPTER_STATE_LOCK(ha);
6522 6522 ha->pha->free_loop_id--;
6523 6523 ADAPTER_STATE_UNLOCK(ha);
6524 6524 loop_id = tq->loop_id;
6525 6525 break;
6526 6526
6527 6527 case QL_SUCCESS:
6528 6528 tq->flags |= TQF_FABRIC_DEVICE;
6529 6529 (void) ql_get_port_database(ha,
6530 6530 tq, PDF_NONE);
6531 6531 index = 1;
6532 6532 break;
6533 6533
6534 6534 case QL_LOOP_ID_USED:
6535 6535 tq->loop_id = PORT_NO_LOOP_ID;
6536 6536 loop_id = ha->pha->free_loop_id++;
6537 6537 break;
6538 6538
6539 6539 case QL_ALL_IDS_IN_USE:
6540 6540 tq->loop_id = PORT_NO_LOOP_ID;
6541 6541 index = 1;
6542 6542 break;
6543 6543
6544 6544 default:
6545 6545 tq->loop_id = PORT_NO_LOOP_ID;
6546 6546 index = 1;
6547 6547 break;
6548 6548 }
6549 6549
6550 6550 ADAPTER_STATE_LOCK(ha);
6551 6551 }
6552 6552
6553 6553 ADAPTER_STATE_UNLOCK(ha);
6554 6554 } else {
6555 6555 rval = QL_FUNCTION_FAILED;
6556 6556 }
6557 6557
6558 6558 if (rval != QL_SUCCESS) {
6559 6559 EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6560 6560 } else {
6561 6561 EL(ha, "d_id=%xh, loop_id=%xh, "
6562 6562 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6563 6563 tq->loop_id, tq->port_name[0], tq->port_name[1],
6564 6564 tq->port_name[2], tq->port_name[3], tq->port_name[4],
6565 6565 tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6566 6566 }
6567 6567 return (rval);
6568 6568 }
6569 6569
6570 6570 /*
6571 6571 * ql_login_fabric_port
6572 6572 * Issue login fabric port mailbox command.
6573 6573 *
6574 6574 * Input:
6575 6575 * ha: adapter state pointer.
6576 6576 * tq: target queue pointer.
6577 6577 * loop_id: FC Loop ID.
6578 6578 *
6579 6579 * Returns:
6580 6580 * ql local function return status code.
6581 6581 *
6582 6582 * Context:
6583 6583 * Kernel context.
6584 6584 */
6585 6585 static int
6586 6586 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6587 6587 {
6588 6588 int rval;
6589 6589 int index;
6590 6590 int retry = 0;
6591 6591 port_id_t d_id;
6592 6592 ql_tgt_t *newq;
6593 6593 ql_mbx_data_t mr;
6594 6594
6595 6595 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6596 6596 tq->d_id.b24);
6597 6597
6598 6598 /*
6599 6599 * QL_PARAMETER_ERROR also means the firmware is
6600 6600 * not able to allocate PCB entry due to resource
6601 6601 * issues, or collision.
6602 6602 */
6603 6603 do {
6604 6604 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6605 6605 if ((rval == QL_PARAMETER_ERROR) ||
6606 6606 ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6607 6607 mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6608 6608 retry++;
6609 6609 drv_usecwait(10 * MILLISEC);
6610 6610 } else {
6611 6611 break;
6612 6612 }
6613 6613 } while (retry < 5);
6614 6614
6615 6615 switch (rval) {
6616 6616 case QL_SUCCESS:
6617 6617 tq->loop_id = loop_id;
6618 6618 break;
6619 6619
6620 6620 case QL_PORT_ID_USED:
6621 6621 /*
6622 6622 * This Loop ID should NOT be in use in drivers
6623 6623 */
6624 6624 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6625 6625
6626 6626 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6627 6627 cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6628 6628 "dup loop_id=%xh, d_id=%xh", ha->instance,
6629 6629 newq->loop_id, newq->d_id.b24);
6630 6630 ql_send_logo(ha, newq, NULL);
6631 6631 }
6632 6632
6633 6633 tq->loop_id = mr.mb[1];
6634 6634 break;
6635 6635
6636 6636 case QL_LOOP_ID_USED:
6637 6637 d_id.b.al_pa = LSB(mr.mb[2]);
6638 6638 d_id.b.area = MSB(mr.mb[2]);
6639 6639 d_id.b.domain = LSB(mr.mb[1]);
6640 6640
6641 6641 newq = ql_d_id_to_queue(ha, d_id);
6642 6642 if (newq && (newq->loop_id != loop_id)) {
6643 6643 /*
6644 6644 * This should NEVER ever happen; but this
6645 6645 * code is needed to bail out when the worst
6646 6646 * case happens - or as used to happen before
6647 6647 */
6648 6648 QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6649 6649 "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6650 6650 "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6651 6651 ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6652 6652 newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6653 6653 newq->d_id.b24, loop_id);
6654 6654
6655 6655 if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6656 6656 ADAPTER_STATE_LOCK(ha);
6657 6657
6658 6658 index = ql_alpa_to_index[newq->d_id.b.al_pa];
6659 6659 ql_add_link_b(&ha->dev[index], &newq->device);
6660 6660
6661 6661 newq->d_id.b24 = d_id.b24;
6662 6662
6663 6663 index = ql_alpa_to_index[d_id.b.al_pa];
6664 6664 ql_add_link_b(&ha->dev[index], &newq->device);
6665 6665
6666 6666 ADAPTER_STATE_UNLOCK(ha);
6667 6667 }
6668 6668
6669 6669 (void) ql_get_port_database(ha, newq, PDF_NONE);
6670 6670
6671 6671 }
6672 6672
6673 6673 /*
6674 6674 * Invalidate the loop ID for the
6675 6675 * us to obtain a new one.
6676 6676 */
6677 6677 tq->loop_id = PORT_NO_LOOP_ID;
6678 6678 break;
6679 6679
6680 6680 case QL_ALL_IDS_IN_USE:
6681 6681 rval = QL_FUNCTION_FAILED;
6682 6682 EL(ha, "no loop id's available\n");
6683 6683 break;
6684 6684
6685 6685 default:
6686 6686 if (rval == QL_COMMAND_ERROR) {
6687 6687 switch (mr.mb[1]) {
6688 6688 case 2:
6689 6689 case 3:
6690 6690 rval = QL_MEMORY_ALLOC_FAILED;
6691 6691 break;
6692 6692
6693 6693 case 4:
6694 6694 rval = QL_FUNCTION_TIMEOUT;
6695 6695 break;
6696 6696 case 7:
6697 6697 rval = QL_FABRIC_NOT_INITIALIZED;
6698 6698 break;
6699 6699 default:
6700 6700 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6701 6701 break;
6702 6702 }
6703 6703 } else {
6704 6704 cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6705 6705 " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6706 6706 ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6707 6707 }
6708 6708 break;
6709 6709 }
6710 6710
6711 6711 if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6712 6712 rval != QL_LOOP_ID_USED) {
6713 6713 EL(ha, "failed=%xh\n", rval);
6714 6714 } else {
6715 6715 /*EMPTY*/
6716 6716 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6717 6717 }
6718 6718 return (rval);
6719 6719 }
6720 6720
6721 6721 /*
6722 6722 * ql_logout_port
6723 6723 * Logs out a device if possible.
6724 6724 *
6725 6725 * Input:
6726 6726 * ha: adapter state pointer.
6727 6727 * d_id: 24 bit port ID.
6728 6728 *
6729 6729 * Returns:
6730 6730 * QL local function return status code.
6731 6731 *
6732 6732 * Context:
6733 6733 * Kernel context.
6734 6734 */
6735 6735 static int
6736 6736 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6737 6737 {
6738 6738 ql_link_t *link;
6739 6739 ql_tgt_t *tq;
6740 6740 uint16_t index;
6741 6741
6742 6742 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6743 6743
6744 6744 /* Get head queue index. */
6745 6745 index = ql_alpa_to_index[d_id.b.al_pa];
6746 6746
6747 6747 /* Get device queue. */
6748 6748 tq = NULL;
6749 6749 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6750 6750 tq = link->base_address;
6751 6751 if (tq->d_id.b24 == d_id.b24) {
6752 6752 break;
6753 6753 } else {
6754 6754 tq = NULL;
6755 6755 }
6756 6756 }
6757 6757
6758 6758 if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6759 6759 (void) ql_logout_fabric_port(ha, tq);
6760 6760 tq->loop_id = PORT_NO_LOOP_ID;
6761 6761 }
6762 6762
6763 6763 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6764 6764
6765 6765 return (QL_SUCCESS);
6766 6766 }
6767 6767
6768 6768 /*
6769 6769 * ql_dev_init
6770 6770 * Initialize/allocate device queue.
6771 6771 *
6772 6772 * Input:
6773 6773 * ha: adapter state pointer.
6774 6774 * d_id: device destination ID
6775 6775 * loop_id: device loop ID
6776 6776 * ADAPTER_STATE_LOCK must be already obtained.
6777 6777 *
6778 6778 * Returns:
6779 6779 * NULL = failure
6780 6780 *
6781 6781 * Context:
6782 6782 * Kernel context.
6783 6783 */
6784 6784 ql_tgt_t *
6785 6785 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6786 6786 {
6787 6787 ql_link_t *link;
6788 6788 uint16_t index;
6789 6789 ql_tgt_t *tq;
6790 6790
6791 6791 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6792 6792 ha->instance, d_id.b24, loop_id);
6793 6793
6794 6794 index = ql_alpa_to_index[d_id.b.al_pa];
6795 6795
6796 6796 /* If device queue exists, set proper loop ID. */
6797 6797 tq = NULL;
6798 6798 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6799 6799 tq = link->base_address;
6800 6800 if (tq->d_id.b24 == d_id.b24) {
6801 6801 tq->loop_id = loop_id;
6802 6802
6803 6803 /* Reset port down retry count. */
6804 6804 tq->port_down_retry_count = ha->port_down_retry_count;
6805 6805 tq->qfull_retry_count = ha->qfull_retry_count;
6806 6806
6807 6807 break;
6808 6808 } else {
6809 6809 tq = NULL;
6810 6810 }
6811 6811 }
6812 6812
6813 6813 /* If device does not have queue. */
6814 6814 if (tq == NULL) {
6815 6815 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6816 6816 if (tq != NULL) {
6817 6817 /*
6818 6818 * mutex to protect the device queue,
6819 6819 * does not block interrupts.
6820 6820 */
6821 6821 mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6822 6822 (ha->iflags & IFLG_INTR_AIF) ?
6823 6823 (void *)(uintptr_t)ha->intr_pri :
6824 6824 (void *)(uintptr_t)ha->iblock_cookie);
6825 6825
6826 6826 tq->d_id.b24 = d_id.b24;
6827 6827 tq->loop_id = loop_id;
6828 6828 tq->device.base_address = tq;
6829 6829 tq->iidma_rate = IIDMA_RATE_INIT;
6830 6830
6831 6831 /* Reset port down retry count. */
6832 6832 tq->port_down_retry_count = ha->port_down_retry_count;
6833 6833 tq->qfull_retry_count = ha->qfull_retry_count;
6834 6834
6835 6835 /* Add device to device queue. */
6836 6836 ql_add_link_b(&ha->dev[index], &tq->device);
6837 6837 }
6838 6838 }
6839 6839
6840 6840 if (tq == NULL) {
6841 6841 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6842 6842 } else {
6843 6843 /*EMPTY*/
6844 6844 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6845 6845 }
6846 6846 return (tq);
6847 6847 }
6848 6848
6849 6849 /*
6850 6850 * ql_dev_free
6851 6851 * Remove queue from device list and frees resources used by queue.
6852 6852 *
6853 6853 * Input:
6854 6854 * ha: adapter state pointer.
6855 6855 * tq: target queue pointer.
6856 6856 * ADAPTER_STATE_LOCK must be already obtained.
6857 6857 *
6858 6858 * Context:
6859 6859 * Kernel context.
6860 6860 */
6861 6861 void
6862 6862 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6863 6863 {
6864 6864 ql_link_t *link;
6865 6865 uint16_t index;
6866 6866 ql_lun_t *lq;
6867 6867
6868 6868 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6869 6869
6870 6870 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6871 6871 lq = link->base_address;
6872 6872 if (lq->cmd.first != NULL) {
6873 6873 return;
6874 6874 }
6875 6875 }
6876 6876
6877 6877 if (tq->outcnt == 0) {
6878 6878 /* Get head queue index. */
6879 6879 index = ql_alpa_to_index[tq->d_id.b.al_pa];
6880 6880 for (link = ha->dev[index].first; link != NULL;
6881 6881 link = link->next) {
6882 6882 if (link->base_address == tq) {
6883 6883 ql_remove_link(&ha->dev[index], link);
6884 6884
6885 6885 link = tq->lun_queues.first;
6886 6886 while (link != NULL) {
6887 6887 lq = link->base_address;
6888 6888 link = link->next;
6889 6889
6890 6890 ql_remove_link(&tq->lun_queues,
6891 6891 &lq->link);
6892 6892 kmem_free(lq, sizeof (ql_lun_t));
6893 6893 }
6894 6894
6895 6895 mutex_destroy(&tq->mutex);
6896 6896 kmem_free(tq, sizeof (ql_tgt_t));
6897 6897 break;
6898 6898 }
6899 6899 }
6900 6900 }
6901 6901
6902 6902 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6903 6903 }
6904 6904
6905 6905 /*
6906 6906 * ql_lun_queue
6907 6907 * Allocate LUN queue if does not exists.
6908 6908 *
6909 6909 * Input:
6910 6910 * ha: adapter state pointer.
6911 6911 * tq: target queue.
6912 6912 * lun: LUN number.
6913 6913 *
6914 6914 * Returns:
6915 6915 * NULL = failure
6916 6916 *
6917 6917 * Context:
6918 6918 * Kernel context.
6919 6919 */
6920 6920 static ql_lun_t *
6921 6921 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6922 6922 {
6923 6923 ql_lun_t *lq;
6924 6924 ql_link_t *link;
6925 6925
6926 6926 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6927 6927
6928 6928 /* Fast path. */
6929 6929 if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6930 6930 QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6931 6931 return (tq->last_lun_queue);
6932 6932 }
6933 6933
6934 6934 if (lun >= MAX_LUNS) {
6935 6935 EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6936 6936 return (NULL);
6937 6937 }
6938 6938 /* If device queue exists, set proper loop ID. */
6939 6939 lq = NULL;
6940 6940 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6941 6941 lq = link->base_address;
6942 6942 if (lq->lun_no == lun) {
6943 6943 QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6944 6944 tq->last_lun_queue = lq;
6945 6945 return (lq);
6946 6946 }
6947 6947 }
6948 6948
6949 6949 /* If queue does exist. */
6950 6950 lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6951 6951
6952 6952 /* Initialize LUN queue. */
6953 6953 if (lq != NULL) {
6954 6954 lq->link.base_address = lq;
6955 6955
6956 6956 lq->lun_no = lun;
6957 6957 lq->target_queue = tq;
6958 6958
6959 6959 DEVICE_QUEUE_LOCK(tq);
6960 6960 ql_add_link_b(&tq->lun_queues, &lq->link);
6961 6961 DEVICE_QUEUE_UNLOCK(tq);
6962 6962 tq->last_lun_queue = lq;
6963 6963 }
6964 6964
6965 6965 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6966 6966
6967 6967 return (lq);
6968 6968 }
6969 6969
6970 6970 /*
6971 6971 * ql_fcp_scsi_cmd
6972 6972 * Process fibre channel (FCP) SCSI protocol commands.
6973 6973 *
6974 6974 * Input:
6975 6975 * ha = adapter state pointer.
6976 6976 * pkt = pointer to fc_packet.
6977 6977 * sp = srb pointer.
6978 6978 *
6979 6979 * Returns:
6980 6980 * FC_SUCCESS - the packet was accepted for transport.
6981 6981 * FC_TRANSPORT_ERROR - a transport error occurred.
6982 6982 *
6983 6983 * Context:
6984 6984 * Kernel context.
6985 6985 */
6986 6986 static int
6987 6987 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6988 6988 {
6989 6989 port_id_t d_id;
6990 6990 ql_tgt_t *tq;
6991 6991 uint64_t *ptr;
6992 6992 uint16_t lun;
6993 6993
6994 6994 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6995 6995
6996 6996 tq = (ql_tgt_t *)pkt->pkt_fca_device;
6997 6997 if (tq == NULL) {
6998 6998 d_id.r.rsvd_1 = 0;
6999 6999 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7000 7000 tq = ql_d_id_to_queue(ha, d_id);
7001 7001 }
7002 7002
7003 7003 sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
7004 7004 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7005 7005 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7006 7006
7007 7007 if (tq != NULL &&
7008 7008 (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
7009 7009
7010 7010 /*
7011 7011 * zero out FCP response; 24 Bytes
7012 7012 */
7013 7013 ptr = (uint64_t *)pkt->pkt_resp;
7014 7014 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
7015 7015
7016 7016 /* Handle task management function. */
7017 7017 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
7018 7018 sp->fcp->fcp_cntl.cntl_clr_aca |
7019 7019 sp->fcp->fcp_cntl.cntl_reset_tgt |
7020 7020 sp->fcp->fcp_cntl.cntl_reset_lun |
7021 7021 sp->fcp->fcp_cntl.cntl_clr_tsk |
7022 7022 sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
7023 7023 ql_task_mgmt(ha, tq, pkt, sp);
7024 7024 } else {
7025 7025 ha->pha->xioctl->IosRequested++;
7026 7026 ha->pha->xioctl->BytesRequested += (uint32_t)
7027 7027 sp->fcp->fcp_data_len;
7028 7028
7029 7029 /*
7030 7030 * Setup for commands with data transfer
7031 7031 */
7032 7032 sp->iocb = ha->fcp_cmd;
7033 7033 sp->req_cnt = 1;
7034 7034 if (sp->fcp->fcp_data_len != 0) {
7035 7035 /*
7036 7036 * FCP data is bound to pkt_data_dma
7037 7037 */
7038 7038 if (sp->fcp->fcp_cntl.cntl_write_data) {
7039 7039 (void) ddi_dma_sync(pkt->pkt_data_dma,
7040 7040 0, 0, DDI_DMA_SYNC_FORDEV);
7041 7041 }
7042 7042
7043 7043 /* Setup IOCB count. */
7044 7044 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
7045 7045 (!CFG_IST(ha, CFG_CTRL_8021) ||
7046 7046 sp->sg_dma.dma_handle == NULL)) {
7047 7047 uint32_t cnt;
7048 7048
7049 7049 cnt = pkt->pkt_data_cookie_cnt -
7050 7050 ha->cmd_segs;
7051 7051 sp->req_cnt = (uint16_t)
7052 7052 (cnt / ha->cmd_cont_segs);
7053 7053 if (cnt % ha->cmd_cont_segs) {
7054 7054 sp->req_cnt = (uint16_t)
7055 7055 (sp->req_cnt + 2);
7056 7056 } else {
7057 7057 sp->req_cnt++;
7058 7058 }
7059 7059 }
7060 7060 }
7061 7061 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7062 7062
7063 7063 return (ql_start_cmd(ha, tq, pkt, sp));
7064 7064 }
7065 7065 } else {
7066 7066 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7067 7067 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7068 7068
7069 7069 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7070 7070 ql_awaken_task_daemon(ha, sp, 0, 0);
7071 7071 }
7072 7072
7073 7073 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7074 7074
7075 7075 return (FC_SUCCESS);
7076 7076 }
7077 7077
7078 7078 /*
7079 7079 * ql_task_mgmt
7080 7080 * Task management function processor.
7081 7081 *
7082 7082 * Input:
7083 7083 * ha: adapter state pointer.
7084 7084 * tq: target queue pointer.
7085 7085 * pkt: pointer to fc_packet.
7086 7086 * sp: SRB pointer.
7087 7087 *
7088 7088 * Context:
7089 7089 * Kernel context.
7090 7090 */
7091 7091 static void
7092 7092 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7093 7093 ql_srb_t *sp)
7094 7094 {
7095 7095 fcp_rsp_t *fcpr;
7096 7096 struct fcp_rsp_info *rsp;
7097 7097 uint16_t lun;
7098 7098
7099 7099 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7100 7100
7101 7101 fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7102 7102 rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7103 7103
7104 7104 bzero(fcpr, pkt->pkt_rsplen);
7105 7105
7106 7106 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7107 7107 fcpr->fcp_response_len = 8;
7108 7108 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7109 7109 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7110 7110
7111 7111 if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7112 7112 if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7113 7113 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7114 7114 }
7115 7115 } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7116 7116 if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7117 7117 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7118 7118 }
7119 7119 } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7120 7120 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7121 7121 QL_SUCCESS) {
7122 7122 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7123 7123 }
7124 7124 } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7125 7125 if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7126 7126 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7127 7127 }
7128 7128 } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7129 7129 if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7130 7130 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7131 7131 }
7132 7132 } else {
7133 7133 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7134 7134 }
7135 7135
7136 7136 pkt->pkt_state = FC_PKT_SUCCESS;
7137 7137
7138 7138 /* Do command callback. */
7139 7139 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7140 7140 ql_awaken_task_daemon(ha, sp, 0, 0);
7141 7141 }
7142 7142
7143 7143 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7144 7144 }
7145 7145
7146 7146 /*
7147 7147 * ql_fcp_ip_cmd
7148 7148 * Process fibre channel (FCP) Internet (IP) protocols commands.
7149 7149 *
7150 7150 * Input:
7151 7151 * ha: adapter state pointer.
7152 7152 * pkt: pointer to fc_packet.
7153 7153 * sp: SRB pointer.
7154 7154 *
7155 7155 * Returns:
7156 7156 * FC_SUCCESS - the packet was accepted for transport.
7157 7157 * FC_TRANSPORT_ERROR - a transport error occurred.
7158 7158 *
7159 7159 * Context:
7160 7160 * Kernel context.
7161 7161 */
7162 7162 static int
7163 7163 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7164 7164 {
7165 7165 port_id_t d_id;
7166 7166 ql_tgt_t *tq;
7167 7167
7168 7168 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7169 7169
7170 7170 tq = (ql_tgt_t *)pkt->pkt_fca_device;
7171 7171 if (tq == NULL) {
7172 7172 d_id.r.rsvd_1 = 0;
7173 7173 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7174 7174 tq = ql_d_id_to_queue(ha, d_id);
7175 7175 }
7176 7176
7177 7177 if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7178 7178 /*
7179 7179 * IP data is bound to pkt_cmd_dma
7180 7180 */
7181 7181 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7182 7182 0, 0, DDI_DMA_SYNC_FORDEV);
7183 7183
7184 7184 /* Setup IOCB count. */
7185 7185 sp->iocb = ha->ip_cmd;
7186 7186 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7187 7187 uint32_t cnt;
7188 7188
7189 7189 cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7190 7190 sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7191 7191 if (cnt % ha->cmd_cont_segs) {
7192 7192 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7193 7193 } else {
7194 7194 sp->req_cnt++;
7195 7195 }
7196 7196 } else {
7197 7197 sp->req_cnt = 1;
7198 7198 }
7199 7199 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7200 7200
7201 7201 return (ql_start_cmd(ha, tq, pkt, sp));
7202 7202 } else {
7203 7203 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7204 7204 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7205 7205
7206 7206 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7207 7207 ql_awaken_task_daemon(ha, sp, 0, 0);
7208 7208 }
7209 7209
7210 7210 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7211 7211
7212 7212 return (FC_SUCCESS);
7213 7213 }
7214 7214
7215 7215 /*
7216 7216 * ql_fc_services
7217 7217 * Process fibre channel services (name server).
7218 7218 *
7219 7219 * Input:
7220 7220 * ha: adapter state pointer.
7221 7221 * pkt: pointer to fc_packet.
7222 7222 *
7223 7223 * Returns:
7224 7224 * FC_SUCCESS - the packet was accepted for transport.
7225 7225 * FC_TRANSPORT_ERROR - a transport error occurred.
7226 7226 *
7227 7227 * Context:
7228 7228 * Kernel context.
7229 7229 */
7230 7230 static int
7231 7231 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7232 7232 {
7233 7233 uint32_t cnt;
7234 7234 fc_ct_header_t hdr;
7235 7235 la_els_rjt_t rjt;
7236 7236 port_id_t d_id;
7237 7237 ql_tgt_t *tq;
7238 7238 ql_srb_t *sp;
7239 7239 int rval;
7240 7240
7241 7241 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7242 7242
7243 7243 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7244 7244 (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7245 7245
7246 7246 bzero(&rjt, sizeof (rjt));
7247 7247
7248 7248 /* Do some sanity checks */
7249 7249 cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7250 7250 sizeof (fc_ct_header_t));
7251 7251 if (cnt > (uint32_t)pkt->pkt_rsplen) {
7252 7252 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7253 7253 pkt->pkt_rsplen);
7254 7254 return (FC_ELS_MALFORMED);
7255 7255 }
7256 7256
7257 7257 switch (hdr.ct_fcstype) {
7258 7258 case FCSTYPE_DIRECTORY:
7259 7259 case FCSTYPE_MGMTSERVICE:
7260 7260 /* An FCA must make sure that the header is in big endian */
7261 7261 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7262 7262
7263 7263 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7264 7264 tq = ql_d_id_to_queue(ha, d_id);
7265 7265 sp = (ql_srb_t *)pkt->pkt_fca_private;
7266 7266 if (tq == NULL ||
7267 7267 (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7268 7268 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7269 7269 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7270 7270 rval = QL_SUCCESS;
7271 7271 break;
7272 7272 }
7273 7273
7274 7274 /*
7275 7275 * Services data is bound to pkt_cmd_dma
7276 7276 */
7277 7277 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7278 7278 DDI_DMA_SYNC_FORDEV);
7279 7279
7280 7280 sp->flags |= SRB_MS_PKT;
7281 7281 sp->retry_count = 32;
7282 7282
7283 7283 /* Setup IOCB count. */
7284 7284 sp->iocb = ha->ms_cmd;
7285 7285 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7286 7286 cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7287 7287 sp->req_cnt =
7288 7288 (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7289 7289 if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7290 7290 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7291 7291 } else {
7292 7292 sp->req_cnt++;
7293 7293 }
7294 7294 } else {
7295 7295 sp->req_cnt = 1;
7296 7296 }
7297 7297 rval = ql_start_cmd(ha, tq, pkt, sp);
7298 7298
7299 7299 QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7300 7300 ha->instance, rval);
7301 7301
7302 7302 return (rval);
7303 7303
7304 7304 default:
7305 7305 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7306 7306 rval = QL_FUNCTION_PARAMETER_ERROR;
7307 7307 break;
7308 7308 }
7309 7309
7310 7310 if (rval != QL_SUCCESS) {
7311 7311 /* Build RJT. */
7312 7312 rjt.ls_code.ls_code = LA_ELS_RJT;
7313 7313 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7314 7314
7315 7315 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7316 7316 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7317 7317
7318 7318 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7319 7319 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7320 7320 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7321 7321 }
7322 7322
7323 7323 /* Do command callback. */
7324 7324 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7325 7325 ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7326 7326 0, 0);
7327 7327 }
7328 7328
7329 7329 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7330 7330
7331 7331 return (FC_SUCCESS);
7332 7332 }
7333 7333
7334 7334 /*
7335 7335 * ql_cthdr_endian
7336 7336 * Change endianess of ct passthrough header and payload.
7337 7337 *
7338 7338 * Input:
7339 7339 * acc_handle: DMA buffer access handle.
7340 7340 * ct_hdr: Pointer to header.
7341 7341 * restore: Restore first flag.
7342 7342 *
7343 7343 * Context:
7344 7344 * Interrupt or Kernel context, no mailbox commands allowed.
7345 7345 */
7346 7346 void
7347 7347 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7348 7348 boolean_t restore)
7349 7349 {
7350 7350 uint8_t i, *bp;
7351 7351 fc_ct_header_t hdr;
7352 7352 uint32_t *hdrp = (uint32_t *)&hdr;
7353 7353
7354 7354 ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7355 7355 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7356 7356
7357 7357 if (restore) {
7358 7358 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7359 7359 *hdrp = BE_32(*hdrp);
7360 7360 hdrp++;
7361 7361 }
7362 7362 }
7363 7363
7364 7364 if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7365 7365 bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7366 7366
7367 7367 switch (hdr.ct_cmdrsp) {
7368 7368 case NS_GA_NXT:
7369 7369 case NS_GPN_ID:
7370 7370 case NS_GNN_ID:
7371 7371 case NS_GCS_ID:
7372 7372 case NS_GFT_ID:
7373 7373 case NS_GSPN_ID:
7374 7374 case NS_GPT_ID:
7375 7375 case NS_GID_FT:
7376 7376 case NS_GID_PT:
7377 7377 case NS_RPN_ID:
7378 7378 case NS_RNN_ID:
7379 7379 case NS_RSPN_ID:
7380 7380 case NS_DA_ID:
7381 7381 BIG_ENDIAN_32(bp);
7382 7382 break;
7383 7383 case NS_RFT_ID:
7384 7384 case NS_RCS_ID:
7385 7385 case NS_RPT_ID:
7386 7386 BIG_ENDIAN_32(bp);
7387 7387 bp += 4;
7388 7388 BIG_ENDIAN_32(bp);
7389 7389 break;
7390 7390 case NS_GNN_IP:
7391 7391 case NS_GIPA_IP:
7392 7392 BIG_ENDIAN(bp, 16);
7393 7393 break;
7394 7394 case NS_RIP_NN:
7395 7395 bp += 8;
7396 7396 BIG_ENDIAN(bp, 16);
7397 7397 break;
7398 7398 case NS_RIPA_NN:
7399 7399 bp += 8;
7400 7400 BIG_ENDIAN_64(bp);
7401 7401 break;
7402 7402 default:
7403 7403 break;
7404 7404 }
7405 7405 }
7406 7406
7407 7407 if (restore == B_FALSE) {
7408 7408 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7409 7409 *hdrp = BE_32(*hdrp);
7410 7410 hdrp++;
7411 7411 }
7412 7412 }
7413 7413
7414 7414 ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7415 7415 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7416 7416 }
7417 7417
7418 7418 /*
7419 7419 * ql_start_cmd
7420 7420 * Finishes starting fibre channel protocol (FCP) command.
7421 7421 *
7422 7422 * Input:
7423 7423 * ha: adapter state pointer.
7424 7424 * tq: target queue pointer.
7425 7425 * pkt: pointer to fc_packet.
7426 7426 * sp: SRB pointer.
7427 7427 *
7428 7428 * Context:
7429 7429 * Kernel context.
7430 7430 */
7431 7431 static int
7432 7432 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7433 7433 ql_srb_t *sp)
7434 7434 {
7435 7435 int rval = FC_SUCCESS;
7436 7436 time_t poll_wait = 0;
7437 7437 ql_lun_t *lq = sp->lun_queue;
7438 7438
7439 7439 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7440 7440
7441 7441 sp->handle = 0;
7442 7442
7443 7443 /* Set poll for finish. */
7444 7444 if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7445 7445 sp->flags |= SRB_POLL;
7446 7446 if (pkt->pkt_timeout == 0) {
7447 7447 pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7448 7448 }
7449 7449 }
7450 7450
7451 7451 /* Acquire device queue lock. */
7452 7452 DEVICE_QUEUE_LOCK(tq);
7453 7453
7454 7454 /*
7455 7455 * If we need authentication, report device busy to
7456 7456 * upper layers to retry later
7457 7457 */
7458 7458 if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7459 7459 DEVICE_QUEUE_UNLOCK(tq);
7460 7460 EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7461 7461 tq->d_id.b24);
7462 7462 return (FC_DEVICE_BUSY);
7463 7463 }
7464 7464
7465 7465 /* Insert command onto watchdog queue. */
7466 7466 if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7467 7467 ql_timeout_insert(ha, tq, sp);
7468 7468 } else {
7469 7469 /*
7470 7470 * Run dump requests in polled mode as kernel threads
7471 7471 * and interrupts may have been disabled.
7472 7472 */
7473 7473 sp->flags |= SRB_POLL;
7474 7474 sp->init_wdg_q_time = 0;
7475 7475 sp->isp_timeout = 0;
7476 7476 }
7477 7477
7478 7478 /* If a polling command setup wait time. */
7479 7479 if (sp->flags & SRB_POLL) {
7480 7480 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7481 7481 poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7482 7482 } else {
7483 7483 poll_wait = pkt->pkt_timeout;
7484 7484 }
7485 7485 }
7486 7486
7487 7487 if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7488 7488 (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7489 7489 /* Set ending status. */
7490 7490 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7491 7491
7492 7492 /* Call done routine to handle completions. */
7493 7493 sp->cmd.next = NULL;
7494 7494 DEVICE_QUEUE_UNLOCK(tq);
7495 7495 ql_done(&sp->cmd);
7496 7496 } else {
7497 7497 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7498 7498 int do_lip = 0;
7499 7499
7500 7500 DEVICE_QUEUE_UNLOCK(tq);
7501 7501
7502 7502 ADAPTER_STATE_LOCK(ha);
7503 7503 if ((do_lip = ha->pha->lip_on_panic) == 0) {
7504 7504 ha->pha->lip_on_panic++;
7505 7505 }
7506 7506 ADAPTER_STATE_UNLOCK(ha);
7507 7507
7508 7508 if (!do_lip) {
7509 7509
7510 7510 /*
7511 7511 * That Qlogic F/W performs PLOGI, PRLI, etc
7512 7512 * is helpful here. If a PLOGI fails for some
7513 7513 * reason, you would get CS_PORT_LOGGED_OUT
7514 7514 * or some such error; and we should get a
7515 7515 * careful polled mode login kicked off inside
7516 7516 * of this driver itself. You don't have FC
7517 7517 * transport's services as all threads are
7518 7518 * suspended, interrupts disabled, and so
7519 7519 * on. Right now we do re-login if the packet
7520 7520 * state isn't FC_PKT_SUCCESS.
7521 7521 */
7522 7522 (void) ql_abort_isp(ha);
7523 7523 }
7524 7524
7525 7525 ql_start_iocb(ha, sp);
7526 7526 } else {
7527 7527 /* Add the command to the device queue */
7528 7528 if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7529 7529 ql_add_link_t(&lq->cmd, &sp->cmd);
7530 7530 } else {
7531 7531 ql_add_link_b(&lq->cmd, &sp->cmd);
7532 7532 }
7533 7533
7534 7534 sp->flags |= SRB_IN_DEVICE_QUEUE;
7535 7535
7536 7536 /* Check whether next message can be processed */
7537 7537 ql_next(ha, lq);
7538 7538 }
7539 7539 }
7540 7540
7541 7541 /* If polling, wait for finish. */
7542 7542 if (poll_wait) {
7543 7543 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7544 7544 int res;
7545 7545
7546 7546 res = ql_abort((opaque_t)ha, pkt, 0);
7547 7547 if (res != FC_SUCCESS && res != FC_ABORTED) {
7548 7548 DEVICE_QUEUE_LOCK(tq);
7549 7549 ql_remove_link(&lq->cmd, &sp->cmd);
7550 7550 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7551 7551 DEVICE_QUEUE_UNLOCK(tq);
7552 7552 }
7553 7553 }
7554 7554
7555 7555 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7556 7556 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7557 7557 rval = FC_TRANSPORT_ERROR;
7558 7558 }
7559 7559
7560 7560 if (ddi_in_panic()) {
7561 7561 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7562 7562 port_id_t d_id;
7563 7563
7564 7564 /*
7565 7565 * successful LOGIN implies by design
7566 7566 * that PRLI also succeeded for disks
7567 7567 * Note also that there is no special
7568 7568 * mailbox command to send PRLI.
7569 7569 */
7570 7570 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7571 7571 (void) ql_login_port(ha, d_id);
7572 7572 }
7573 7573 }
7574 7574
7575 7575 /*
7576 7576 * This should only happen during CPR dumping
7577 7577 */
7578 7578 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7579 7579 pkt->pkt_comp) {
7580 7580 sp->flags &= ~SRB_POLL;
7581 7581 (*pkt->pkt_comp)(pkt);
7582 7582 }
7583 7583 }
7584 7584
7585 7585 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7586 7586
7587 7587 return (rval);
7588 7588 }
7589 7589
7590 7590 /*
7591 7591 * ql_poll_cmd
7592 7592 * Polls commands for completion.
7593 7593 *
7594 7594 * Input:
7595 7595 * ha = adapter state pointer.
7596 7596 * sp = SRB command pointer.
7597 7597 * poll_wait = poll wait time in seconds.
7598 7598 *
7599 7599 * Returns:
7600 7600 * QL local function return status code.
7601 7601 *
7602 7602 * Context:
7603 7603 * Kernel context.
7604 7604 */
7605 7605 static int
7606 7606 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7607 7607 {
7608 7608 int rval = QL_SUCCESS;
7609 7609 time_t msecs_left = poll_wait * 100; /* 10ms inc */
7610 7610 ql_adapter_state_t *ha = vha->pha;
7611 7611
7612 7612 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7613 7613
7614 7614 while (sp->flags & SRB_POLL) {
7615 7615
7616 7616 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7617 7617 ha->idle_timer >= 15 || ddi_in_panic()) {
7618 7618
7619 7619 /* If waiting for restart, do it now. */
7620 7620 if (ha->port_retry_timer != 0) {
7621 7621 ADAPTER_STATE_LOCK(ha);
7622 7622 ha->port_retry_timer = 0;
7623 7623 ADAPTER_STATE_UNLOCK(ha);
7624 7624
7625 7625 TASK_DAEMON_LOCK(ha);
7626 7626 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7627 7627 TASK_DAEMON_UNLOCK(ha);
7628 7628 }
7629 7629
7630 7630 if (INTERRUPT_PENDING(ha)) {
7631 7631 (void) ql_isr((caddr_t)ha);
7632 7632 INTR_LOCK(ha);
7633 7633 ha->intr_claimed = TRUE;
7634 7634 INTR_UNLOCK(ha);
7635 7635 }
7636 7636
7637 7637 /*
7638 7638 * Call task thread function in case the
7639 7639 * daemon is not running.
7640 7640 */
7641 7641 TASK_DAEMON_LOCK(ha);
7642 7642
7643 7643 if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7644 7644 QL_TASK_PENDING(ha)) {
7645 7645 ha->task_daemon_flags |= TASK_THREAD_CALLED;
7646 7646 ql_task_thread(ha);
7647 7647 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7648 7648 }
7649 7649
7650 7650 TASK_DAEMON_UNLOCK(ha);
7651 7651 }
7652 7652
7653 7653 if (msecs_left < 10) {
7654 7654 rval = QL_FUNCTION_TIMEOUT;
7655 7655 break;
7656 7656 }
7657 7657
7658 7658 /*
7659 7659 * Polling interval is 10 milli seconds; Increasing
7660 7660 * the polling interval to seconds since disk IO
7661 7661 * timeout values are ~60 seconds is tempting enough,
7662 7662 * but CPR dump time increases, and so will the crash
7663 7663 * dump time; Don't toy with the settings without due
7664 7664 * consideration for all the scenarios that will be
7665 7665 * impacted.
7666 7666 */
7667 7667 ql_delay(ha, 10000);
7668 7668 msecs_left -= 10;
7669 7669 }
7670 7670
7671 7671 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7672 7672
7673 7673 return (rval);
7674 7674 }
7675 7675
7676 7676 /*
7677 7677 * ql_next
7678 7678 * Retrieve and process next job in the device queue.
7679 7679 *
7680 7680 * Input:
7681 7681 * ha: adapter state pointer.
7682 7682 * lq: LUN queue pointer.
7683 7683 * DEVICE_QUEUE_LOCK must be already obtained.
7684 7684 *
7685 7685 * Output:
7686 7686 * Releases DEVICE_QUEUE_LOCK upon exit.
7687 7687 *
7688 7688 * Context:
7689 7689 * Interrupt or Kernel context, no mailbox commands allowed.
7690 7690 */
7691 7691 void
7692 7692 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7693 7693 {
7694 7694 ql_srb_t *sp;
7695 7695 ql_link_t *link;
7696 7696 ql_tgt_t *tq = lq->target_queue;
7697 7697 ql_adapter_state_t *ha = vha->pha;
7698 7698
7699 7699 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7700 7700
7701 7701 if (ddi_in_panic()) {
7702 7702 DEVICE_QUEUE_UNLOCK(tq);
7703 7703 QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7704 7704 ha->instance);
7705 7705 return;
7706 7706 }
7707 7707
7708 7708 while ((link = lq->cmd.first) != NULL) {
7709 7709 sp = link->base_address;
7710 7710
7711 7711 /* Exit if can not start commands. */
7712 7712 if (DRIVER_SUSPENDED(ha) ||
7713 7713 (ha->flags & ONLINE) == 0 ||
7714 7714 !VALID_DEVICE_ID(ha, tq->loop_id) ||
7715 7715 sp->flags & SRB_ABORT ||
7716 7716 tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7717 7717 TQF_QUEUE_SUSPENDED)) {
7718 7718 EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7719 7719 "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7720 7720 ha->task_daemon_flags, tq->flags, sp->flags,
7721 7721 ha->flags, tq->loop_id);
7722 7722 break;
7723 7723 }
7724 7724
7725 7725 /*
7726 7726 * Find out the LUN number for untagged command use.
7727 7727 * If there is an untagged command pending for the LUN,
7728 7728 * we would not submit another untagged command
7729 7729 * or if reached LUN execution throttle.
7730 7730 */
7731 7731 if (sp->flags & SRB_FCP_CMD_PKT) {
7732 7732 if (lq->flags & LQF_UNTAGGED_PENDING ||
7733 7733 lq->lun_outcnt >= ha->execution_throttle) {
7734 7734 QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7735 7735 "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7736 7736 tq->d_id.b24, lq->flags, lq->lun_outcnt);
7737 7737 break;
7738 7738 }
7739 7739 if (sp->fcp->fcp_cntl.cntl_qtype ==
7740 7740 FCP_QTYPE_UNTAGGED) {
7741 7741 /*
7742 7742 * Set the untagged-flag for the LUN
7743 7743 * so that no more untagged commands
7744 7744 * can be submitted for this LUN.
7745 7745 */
7746 7746 lq->flags |= LQF_UNTAGGED_PENDING;
7747 7747 }
7748 7748
7749 7749 /* Count command as sent. */
7750 7750 lq->lun_outcnt++;
7751 7751 }
7752 7752
7753 7753 /* Remove srb from device queue. */
7754 7754 ql_remove_link(&lq->cmd, &sp->cmd);
7755 7755 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7756 7756
7757 7757 tq->outcnt++;
7758 7758
7759 7759 ql_start_iocb(vha, sp);
7760 7760 }
7761 7761
7762 7762 /* Release device queue lock. */
7763 7763 DEVICE_QUEUE_UNLOCK(tq);
7764 7764
7765 7765 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7766 7766 }
7767 7767
7768 7768 /*
7769 7769 * ql_done
7770 7770 * Process completed commands.
7771 7771 *
7772 7772 * Input:
7773 7773 * link: first command link in chain.
7774 7774 *
7775 7775 * Context:
7776 7776 * Interrupt or Kernel context, no mailbox commands allowed.
7777 7777 */
7778 7778 void
7779 7779 ql_done(ql_link_t *link)
7780 7780 {
7781 7781 ql_adapter_state_t *ha;
7782 7782 ql_link_t *next_link;
7783 7783 ql_srb_t *sp;
7784 7784 ql_tgt_t *tq;
7785 7785 ql_lun_t *lq;
7786 7786
7787 7787 QL_PRINT_3(CE_CONT, "started\n");
7788 7788
7789 7789 for (; link != NULL; link = next_link) {
7790 7790 next_link = link->next;
7791 7791 sp = link->base_address;
7792 7792 ha = sp->ha;
7793 7793
7794 7794 if (sp->flags & SRB_UB_CALLBACK) {
7795 7795 QL_UB_LOCK(ha);
7796 7796 if (sp->flags & SRB_UB_IN_ISP) {
7797 7797 if (ha->ub_outcnt != 0) {
7798 7798 ha->ub_outcnt--;
7799 7799 }
7800 7800 QL_UB_UNLOCK(ha);
7801 7801 ql_isp_rcvbuf(ha);
7802 7802 QL_UB_LOCK(ha);
7803 7803 }
7804 7804 QL_UB_UNLOCK(ha);
7805 7805 ql_awaken_task_daemon(ha, sp, 0, 0);
7806 7806 } else {
7807 7807 /* Free outstanding command slot. */
7808 7808 if (sp->handle != 0) {
7809 7809 ha->outstanding_cmds[
7810 7810 sp->handle & OSC_INDEX_MASK] = NULL;
7811 7811 sp->handle = 0;
7812 7812 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7813 7813 }
7814 7814
7815 7815 /* Acquire device queue lock. */
7816 7816 lq = sp->lun_queue;
7817 7817 tq = lq->target_queue;
7818 7818 DEVICE_QUEUE_LOCK(tq);
7819 7819
7820 7820 /* Decrement outstanding commands on device. */
7821 7821 if (tq->outcnt != 0) {
7822 7822 tq->outcnt--;
7823 7823 }
7824 7824
7825 7825 if (sp->flags & SRB_FCP_CMD_PKT) {
7826 7826 if (sp->fcp->fcp_cntl.cntl_qtype ==
7827 7827 FCP_QTYPE_UNTAGGED) {
7828 7828 /*
7829 7829 * Clear the flag for this LUN so that
7830 7830 * untagged commands can be submitted
7831 7831 * for it.
7832 7832 */
7833 7833 lq->flags &= ~LQF_UNTAGGED_PENDING;
7834 7834 }
7835 7835
7836 7836 if (lq->lun_outcnt != 0) {
7837 7837 lq->lun_outcnt--;
7838 7838 }
7839 7839 }
7840 7840
7841 7841 /* Reset port down retry count on good completion. */
7842 7842 if (sp->pkt->pkt_reason == CS_COMPLETE) {
7843 7843 tq->port_down_retry_count =
7844 7844 ha->port_down_retry_count;
7845 7845 tq->qfull_retry_count = ha->qfull_retry_count;
7846 7846 }
7847 7847
7848 7848
7849 7849 /* Alter aborted status for fast timeout feature */
7850 7850 if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7851 7851 (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7852 7852 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7853 7853 sp->flags & SRB_RETRY &&
7854 7854 (sp->flags & SRB_WATCHDOG_ENABLED &&
7855 7855 sp->wdg_q_time > 1)) {
7856 7856 EL(ha, "fast abort modify change\n");
7857 7857 sp->flags &= ~(SRB_RETRY);
7858 7858 sp->pkt->pkt_reason = CS_TIMEOUT;
7859 7859 }
7860 7860
7861 7861 /* Place request back on top of target command queue */
7862 7862 if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7863 7863 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7864 7864 sp->flags & SRB_RETRY &&
7865 7865 (sp->flags & SRB_WATCHDOG_ENABLED &&
7866 7866 sp->wdg_q_time > 1)) {
7867 7867 sp->flags &= ~(SRB_ISP_STARTED |
7868 7868 SRB_ISP_COMPLETED | SRB_RETRY);
7869 7869
7870 7870 /* Reset watchdog timer */
7871 7871 sp->wdg_q_time = sp->init_wdg_q_time;
7872 7872
7873 7873 /* Issue marker command on reset status. */
7874 7874 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7875 7875 (sp->pkt->pkt_reason == CS_RESET ||
7876 7876 (CFG_IST(ha, CFG_CTRL_24258081) &&
7877 7877 sp->pkt->pkt_reason == CS_ABORTED))) {
7878 7878 (void) ql_marker(ha, tq->loop_id, 0,
7879 7879 MK_SYNC_ID);
7880 7880 }
7881 7881
7882 7882 ql_add_link_t(&lq->cmd, &sp->cmd);
7883 7883 sp->flags |= SRB_IN_DEVICE_QUEUE;
7884 7884 ql_next(ha, lq);
7885 7885 } else {
7886 7886 /* Remove command from watchdog queue. */
7887 7887 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7888 7888 ql_remove_link(&tq->wdg, &sp->wdg);
7889 7889 sp->flags &= ~SRB_WATCHDOG_ENABLED;
7890 7890 }
7891 7891
7892 7892 if (lq->cmd.first != NULL) {
7893 7893 ql_next(ha, lq);
7894 7894 } else {
7895 7895 /* Release LU queue specific lock. */
7896 7896 DEVICE_QUEUE_UNLOCK(tq);
7897 7897 if (ha->pha->pending_cmds.first !=
7898 7898 NULL) {
7899 7899 ql_start_iocb(ha, NULL);
7900 7900 }
7901 7901 }
7902 7902
7903 7903 /* Sync buffers if required. */
7904 7904 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7905 7905 (void) ddi_dma_sync(
7906 7906 sp->pkt->pkt_resp_dma,
7907 7907 0, 0, DDI_DMA_SYNC_FORCPU);
7908 7908 }
7909 7909
7910 7910 /* Map ISP completion codes. */
7911 7911 sp->pkt->pkt_expln = FC_EXPLN_NONE;
7912 7912 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7913 7913 switch (sp->pkt->pkt_reason) {
7914 7914 case CS_COMPLETE:
7915 7915 sp->pkt->pkt_state = FC_PKT_SUCCESS;
7916 7916 break;
7917 7917 case CS_RESET:
7918 7918 /* Issue marker command. */
7919 7919 if (!(ha->task_daemon_flags &
7920 7920 LOOP_DOWN)) {
7921 7921 (void) ql_marker(ha,
7922 7922 tq->loop_id, 0,
7923 7923 MK_SYNC_ID);
7924 7924 }
7925 7925 sp->pkt->pkt_state =
7926 7926 FC_PKT_PORT_OFFLINE;
7927 7927 sp->pkt->pkt_reason =
7928 7928 FC_REASON_ABORTED;
7929 7929 break;
7930 7930 case CS_RESOUCE_UNAVAILABLE:
7931 7931 sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7932 7932 sp->pkt->pkt_reason =
7933 7933 FC_REASON_PKT_BUSY;
7934 7934 break;
7935 7935
7936 7936 case CS_TIMEOUT:
7937 7937 sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7938 7938 sp->pkt->pkt_reason =
7939 7939 FC_REASON_HW_ERROR;
7940 7940 break;
7941 7941 case CS_DATA_OVERRUN:
7942 7942 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7943 7943 sp->pkt->pkt_reason =
7944 7944 FC_REASON_OVERRUN;
7945 7945 break;
7946 7946 case CS_PORT_UNAVAILABLE:
7947 7947 case CS_PORT_LOGGED_OUT:
7948 7948 sp->pkt->pkt_state =
7949 7949 FC_PKT_PORT_OFFLINE;
7950 7950 sp->pkt->pkt_reason =
7951 7951 FC_REASON_LOGIN_REQUIRED;
7952 7952 ql_send_logo(ha, tq, NULL);
7953 7953 break;
7954 7954 case CS_PORT_CONFIG_CHG:
7955 7955 sp->pkt->pkt_state =
7956 7956 FC_PKT_PORT_OFFLINE;
7957 7957 sp->pkt->pkt_reason =
7958 7958 FC_REASON_OFFLINE;
7959 7959 break;
7960 7960 case CS_QUEUE_FULL:
7961 7961 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7962 7962 sp->pkt->pkt_reason = FC_REASON_QFULL;
7963 7963 break;
7964 7964
7965 7965 case CS_ABORTED:
7966 7966 DEVICE_QUEUE_LOCK(tq);
7967 7967 if (tq->flags & (TQF_RSCN_RCVD |
7968 7968 TQF_NEED_AUTHENTICATION)) {
7969 7969 sp->pkt->pkt_state =
7970 7970 FC_PKT_PORT_OFFLINE;
7971 7971 sp->pkt->pkt_reason =
7972 7972 FC_REASON_LOGIN_REQUIRED;
7973 7973 } else {
7974 7974 sp->pkt->pkt_state =
7975 7975 FC_PKT_LOCAL_RJT;
7976 7976 sp->pkt->pkt_reason =
7977 7977 FC_REASON_ABORTED;
7978 7978 }
7979 7979 DEVICE_QUEUE_UNLOCK(tq);
7980 7980 break;
7981 7981
7982 7982 case CS_TRANSPORT:
7983 7983 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7984 7984 sp->pkt->pkt_reason =
7985 7985 FC_PKT_TRAN_ERROR;
7986 7986 break;
7987 7987
7988 7988 case CS_DATA_UNDERRUN:
7989 7989 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7990 7990 sp->pkt->pkt_reason =
7991 7991 FC_REASON_UNDERRUN;
7992 7992 break;
7993 7993 case CS_DMA_ERROR:
7994 7994 case CS_BAD_PAYLOAD:
7995 7995 case CS_UNKNOWN:
7996 7996 case CS_CMD_FAILED:
7997 7997 default:
7998 7998 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7999 7999 sp->pkt->pkt_reason =
8000 8000 FC_REASON_HW_ERROR;
8001 8001 break;
8002 8002 }
8003 8003
8004 8004 /* Now call the pkt completion callback */
8005 8005 if (sp->flags & SRB_POLL) {
8006 8006 sp->flags &= ~SRB_POLL;
8007 8007 } else if (sp->pkt->pkt_comp) {
8008 8008 if (sp->pkt->pkt_tran_flags &
8009 8009 FC_TRAN_IMMEDIATE_CB) {
8010 8010 (*sp->pkt->pkt_comp)(sp->pkt);
8011 8011 } else {
8012 8012 ql_awaken_task_daemon(ha, sp,
8013 8013 0, 0);
8014 8014 }
8015 8015 }
8016 8016 }
8017 8017 }
8018 8018 }
8019 8019
8020 8020 QL_PRINT_3(CE_CONT, "done\n");
8021 8021 }
8022 8022
8023 8023 /*
8024 8024 * ql_awaken_task_daemon
8025 8025 * Adds command completion callback to callback queue and/or
8026 8026 * awakens task daemon thread.
8027 8027 *
8028 8028 * Input:
8029 8029 * ha: adapter state pointer.
8030 8030 * sp: srb pointer.
8031 8031 * set_flags: task daemon flags to set.
8032 8032 * reset_flags: task daemon flags to reset.
8033 8033 *
8034 8034 * Context:
8035 8035 * Interrupt or Kernel context, no mailbox commands allowed.
8036 8036 */
8037 8037 void
8038 8038 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
8039 8039 uint32_t set_flags, uint32_t reset_flags)
8040 8040 {
8041 8041 ql_adapter_state_t *ha = vha->pha;
8042 8042
8043 8043 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8044 8044
8045 8045 /* Acquire task daemon lock. */
8046 8046 TASK_DAEMON_LOCK(ha);
8047 8047
8048 8048 if (set_flags & ISP_ABORT_NEEDED) {
8049 8049 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8050 8050 set_flags &= ~ISP_ABORT_NEEDED;
8051 8051 }
8052 8052 }
8053 8053
8054 8054 ha->task_daemon_flags |= set_flags;
8055 8055 ha->task_daemon_flags &= ~reset_flags;
8056 8056
8057 8057 if (QL_DAEMON_SUSPENDED(ha)) {
8058 8058 if (sp != NULL) {
8059 8059 TASK_DAEMON_UNLOCK(ha);
8060 8060
8061 8061 /* Do callback. */
8062 8062 if (sp->flags & SRB_UB_CALLBACK) {
8063 8063 ql_unsol_callback(sp);
8064 8064 } else {
8065 8065 (*sp->pkt->pkt_comp)(sp->pkt);
8066 8066 }
8067 8067 } else {
8068 8068 if (!(curthread->t_flag & T_INTR_THREAD) &&
8069 8069 !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8070 8070 ha->task_daemon_flags |= TASK_THREAD_CALLED;
8071 8071 ql_task_thread(ha);
8072 8072 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8073 8073 }
8074 8074
8075 8075 TASK_DAEMON_UNLOCK(ha);
8076 8076 }
8077 8077 } else {
8078 8078 if (sp != NULL) {
8079 8079 ql_add_link_b(&ha->callback_queue, &sp->cmd);
8080 8080 }
8081 8081
8082 8082 if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8083 8083 cv_broadcast(&ha->cv_task_daemon);
8084 8084 }
8085 8085 TASK_DAEMON_UNLOCK(ha);
8086 8086 }
8087 8087
8088 8088 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8089 8089 }
8090 8090
8091 8091 /*
8092 8092 * ql_task_daemon
8093 8093 * Thread that is awaken by the driver when a
8094 8094 * background needs to be done.
8095 8095 *
8096 8096 * Input:
8097 8097 * arg = adapter state pointer.
8098 8098 *
8099 8099 * Context:
8100 8100 * Kernel context.
8101 8101 */
8102 8102 static void
8103 8103 ql_task_daemon(void *arg)
8104 8104 {
8105 8105 ql_adapter_state_t *ha = (void *)arg;
8106 8106
8107 8107 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8108 8108
8109 8109 CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8110 8110 "ql_task_daemon");
8111 8111
8112 8112 /* Acquire task daemon lock. */
8113 8113 TASK_DAEMON_LOCK(ha);
8114 8114
8115 8115 ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8116 8116
8117 8117 while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8118 8118 ql_task_thread(ha);
8119 8119
8120 8120 QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8121 8121
8122 8122 /*
8123 8123 * Before we wait on the conditional variable, we
8124 8124 * need to check if STOP_FLG is set for us to terminate
8125 8125 */
8126 8126 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8127 8127 break;
8128 8128 }
8129 8129
8130 8130 /*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8131 8131 CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8132 8132
8133 8133 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8134 8134
8135 8135 /* If killed, stop task daemon */
8136 8136 if (cv_wait_sig(&ha->cv_task_daemon,
8137 8137 &ha->task_daemon_mutex) == 0) {
8138 8138 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8139 8139 }
8140 8140
8141 8141 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8142 8142
8143 8143 /*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8144 8144 CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8145 8145
8146 8146 QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8147 8147 }
8148 8148
8149 8149 ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8150 8150 TASK_DAEMON_ALIVE_FLG);
8151 8151
8152 8152 /*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8153 8153 CALLB_CPR_EXIT(&ha->cprinfo);
8154 8154
8155 8155 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8156 8156
8157 8157 thread_exit();
8158 8158 }
8159 8159
8160 8160 /*
8161 8161 * ql_task_thread
8162 8162 * Thread run by daemon.
8163 8163 *
8164 8164 * Input:
8165 8165 * ha = adapter state pointer.
8166 8166 * TASK_DAEMON_LOCK must be acquired prior to call.
8167 8167 *
8168 8168 * Context:
8169 8169 * Kernel context.
8170 8170 */
8171 8171 static void
8172 8172 ql_task_thread(ql_adapter_state_t *ha)
8173 8173 {
8174 8174 int loop_again;
8175 8175 ql_srb_t *sp;
8176 8176 ql_head_t *head;
8177 8177 ql_link_t *link;
8178 8178 caddr_t msg;
8179 8179 ql_adapter_state_t *vha;
8180 8180
8181 8181 do {
8182 8182 QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8183 8183 ha->instance, ha->task_daemon_flags);
8184 8184
8185 8185 loop_again = FALSE;
8186 8186
8187 8187 QL_PM_LOCK(ha);
8188 8188 if (ha->power_level != PM_LEVEL_D0) {
8189 8189 QL_PM_UNLOCK(ha);
8190 8190 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8191 8191 break;
8192 8192 }
8193 8193 QL_PM_UNLOCK(ha);
8194 8194
8195 8195 /* IDC event. */
8196 8196 if (ha->task_daemon_flags & IDC_EVENT) {
8197 8197 ha->task_daemon_flags &= ~IDC_EVENT;
8198 8198 TASK_DAEMON_UNLOCK(ha);
8199 8199 ql_process_idc_event(ha);
8200 8200 TASK_DAEMON_LOCK(ha);
8201 8201 loop_again = TRUE;
8202 8202 }
8203 8203
8204 8204 if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8205 8205 (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8206 8206 (ha->flags & ONLINE) == 0) {
8207 8207 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8208 8208 break;
8209 8209 }
8210 8210 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8211 8211
8212 8212 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8213 8213 TASK_DAEMON_UNLOCK(ha);
8214 8214 if (ha->log_parity_pause == B_TRUE) {
8215 8215 (void) ql_flash_errlog(ha,
8216 8216 FLASH_ERRLOG_PARITY_ERR, 0,
8217 8217 MSW(ha->parity_stat_err),
8218 8218 LSW(ha->parity_stat_err));
8219 8219 ha->log_parity_pause = B_FALSE;
8220 8220 }
8221 8221 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8222 8222 TASK_DAEMON_LOCK(ha);
8223 8223 loop_again = TRUE;
8224 8224 }
8225 8225
8226 8226 /* Idle Check. */
8227 8227 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8228 8228 ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8229 8229 if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8230 8230 TASK_DAEMON_UNLOCK(ha);
8231 8231 ql_idle_check(ha);
8232 8232 TASK_DAEMON_LOCK(ha);
8233 8233 loop_again = TRUE;
8234 8234 }
8235 8235 }
8236 8236
8237 8237 /* Crystal+ port#0 bypass transition */
8238 8238 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8239 8239 ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8240 8240 TASK_DAEMON_UNLOCK(ha);
8241 8241 (void) ql_initiate_lip(ha);
8242 8242 TASK_DAEMON_LOCK(ha);
8243 8243 loop_again = TRUE;
8244 8244 }
8245 8245
8246 8246 /* Abort queues needed. */
8247 8247 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8248 8248 ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8249 8249 TASK_DAEMON_UNLOCK(ha);
8250 8250 ql_abort_queues(ha);
8251 8251 TASK_DAEMON_LOCK(ha);
8252 8252 }
8253 8253
8254 8254 /* Not suspended, awaken waiting routines. */
8255 8255 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8256 8256 ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8257 8257 ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8258 8258 cv_broadcast(&ha->cv_dr_suspended);
8259 8259 loop_again = TRUE;
8260 8260 }
8261 8261
8262 8262 /* Handle RSCN changes. */
8263 8263 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8264 8264 if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8265 8265 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8266 8266 TASK_DAEMON_UNLOCK(ha);
8267 8267 (void) ql_handle_rscn_update(vha);
8268 8268 TASK_DAEMON_LOCK(ha);
8269 8269 loop_again = TRUE;
8270 8270 }
8271 8271 }
8272 8272
8273 8273 /* Handle state changes. */
8274 8274 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8275 8275 if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8276 8276 !(ha->task_daemon_flags &
8277 8277 TASK_DAEMON_POWERING_DOWN)) {
8278 8278 /* Report state change. */
8279 8279 EL(vha, "state change = %xh\n", vha->state);
8280 8280 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8281 8281
8282 8282 if (vha->task_daemon_flags &
8283 8283 COMMAND_WAIT_NEEDED) {
8284 8284 vha->task_daemon_flags &=
8285 8285 ~COMMAND_WAIT_NEEDED;
8286 8286 if (!(ha->task_daemon_flags &
8287 8287 COMMAND_WAIT_ACTIVE)) {
8288 8288 ha->task_daemon_flags |=
8289 8289 COMMAND_WAIT_ACTIVE;
8290 8290 TASK_DAEMON_UNLOCK(ha);
8291 8291 ql_cmd_wait(ha);
8292 8292 TASK_DAEMON_LOCK(ha);
8293 8293 ha->task_daemon_flags &=
8294 8294 ~COMMAND_WAIT_ACTIVE;
8295 8295 }
8296 8296 }
8297 8297
8298 8298 msg = NULL;
8299 8299 if (FC_PORT_STATE_MASK(vha->state) ==
8300 8300 FC_STATE_OFFLINE) {
8301 8301 if (vha->task_daemon_flags &
8302 8302 STATE_ONLINE) {
8303 8303 if (ha->topology &
8304 8304 QL_LOOP_CONNECTION) {
8305 8305 msg = "Loop OFFLINE";
8306 8306 } else {
8307 8307 msg = "Link OFFLINE";
8308 8308 }
8309 8309 }
8310 8310 vha->task_daemon_flags &=
8311 8311 ~STATE_ONLINE;
8312 8312 } else if (FC_PORT_STATE_MASK(vha->state) ==
8313 8313 FC_STATE_LOOP) {
8314 8314 if (!(vha->task_daemon_flags &
8315 8315 STATE_ONLINE)) {
8316 8316 msg = "Loop ONLINE";
8317 8317 }
8318 8318 vha->task_daemon_flags |= STATE_ONLINE;
8319 8319 } else if (FC_PORT_STATE_MASK(vha->state) ==
8320 8320 FC_STATE_ONLINE) {
8321 8321 if (!(vha->task_daemon_flags &
8322 8322 STATE_ONLINE)) {
8323 8323 msg = "Link ONLINE";
8324 8324 }
8325 8325 vha->task_daemon_flags |= STATE_ONLINE;
8326 8326 } else {
8327 8327 msg = "Unknown Link state";
8328 8328 }
8329 8329
8330 8330 if (msg != NULL) {
8331 8331 cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8332 8332 "%s", QL_NAME, ha->instance,
8333 8333 vha->vp_index, msg);
8334 8334 }
8335 8335
8336 8336 if (vha->flags & FCA_BOUND) {
8337 8337 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8338 8338 "cb state=%xh\n", ha->instance,
8339 8339 vha->vp_index, vha->state);
8340 8340 TASK_DAEMON_UNLOCK(ha);
8341 8341 (vha->bind_info.port_statec_cb)
8342 8342 (vha->bind_info.port_handle,
8343 8343 vha->state);
8344 8344 TASK_DAEMON_LOCK(ha);
8345 8345 }
8346 8346 loop_again = TRUE;
8347 8347 }
8348 8348 }
8349 8349
8350 8350 if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8351 8351 !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8352 8352 EL(ha, "processing LIP reset\n");
8353 8353 ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8354 8354 TASK_DAEMON_UNLOCK(ha);
8355 8355 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8356 8356 if (vha->flags & FCA_BOUND) {
8357 8357 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8358 8358 "cb reset\n", ha->instance,
8359 8359 vha->vp_index);
8360 8360 (vha->bind_info.port_statec_cb)
8361 8361 (vha->bind_info.port_handle,
8362 8362 FC_STATE_TARGET_PORT_RESET);
8363 8363 }
8364 8364 }
8365 8365 TASK_DAEMON_LOCK(ha);
8366 8366 loop_again = TRUE;
8367 8367 }
8368 8368
8369 8369 if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8370 8370 FIRMWARE_UP)) {
8371 8371 /*
8372 8372 * The firmware needs more unsolicited
8373 8373 * buffers. We cannot allocate any new
8374 8374 * buffers unless the ULP module requests
8375 8375 * for new buffers. All we can do here is
8376 8376 * to give received buffers from the pool
8377 8377 * that is already allocated
8378 8378 */
8379 8379 ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8380 8380 TASK_DAEMON_UNLOCK(ha);
8381 8381 ql_isp_rcvbuf(ha);
8382 8382 TASK_DAEMON_LOCK(ha);
8383 8383 loop_again = TRUE;
8384 8384 }
8385 8385
8386 8386 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8387 8387 TASK_DAEMON_UNLOCK(ha);
8388 8388 (void) ql_abort_isp(ha);
8389 8389 TASK_DAEMON_LOCK(ha);
8390 8390 loop_again = TRUE;
8391 8391 }
8392 8392
8393 8393 if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8394 8394 COMMAND_WAIT_NEEDED))) {
8395 8395 if (QL_IS_SET(ha->task_daemon_flags,
8396 8396 RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8397 8397 ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8398 8398 if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8399 8399 ha->task_daemon_flags |= RESET_ACTIVE;
8400 8400 TASK_DAEMON_UNLOCK(ha);
8401 8401 for (vha = ha; vha != NULL;
8402 8402 vha = vha->vp_next) {
8403 8403 ql_rst_aen(vha);
8404 8404 }
8405 8405 TASK_DAEMON_LOCK(ha);
8406 8406 ha->task_daemon_flags &= ~RESET_ACTIVE;
8407 8407 loop_again = TRUE;
8408 8408 }
8409 8409 }
8410 8410
8411 8411 if (QL_IS_SET(ha->task_daemon_flags,
8412 8412 LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8413 8413 if (!(ha->task_daemon_flags &
8414 8414 LOOP_RESYNC_ACTIVE)) {
8415 8415 ha->task_daemon_flags |=
8416 8416 LOOP_RESYNC_ACTIVE;
8417 8417 TASK_DAEMON_UNLOCK(ha);
8418 8418 (void) ql_loop_resync(ha);
8419 8419 TASK_DAEMON_LOCK(ha);
8420 8420 loop_again = TRUE;
8421 8421 }
8422 8422 }
8423 8423 }
8424 8424
8425 8425 /* Port retry needed. */
8426 8426 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8427 8427 ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8428 8428 ADAPTER_STATE_LOCK(ha);
8429 8429 ha->port_retry_timer = 0;
8430 8430 ADAPTER_STATE_UNLOCK(ha);
8431 8431
8432 8432 TASK_DAEMON_UNLOCK(ha);
8433 8433 ql_restart_queues(ha);
8434 8434 TASK_DAEMON_LOCK(ha);
8435 8435 loop_again = B_TRUE;
8436 8436 }
8437 8437
8438 8438 /* iiDMA setting needed? */
8439 8439 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8440 8440 ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8441 8441
8442 8442 TASK_DAEMON_UNLOCK(ha);
8443 8443 ql_iidma(ha);
8444 8444 TASK_DAEMON_LOCK(ha);
8445 8445 loop_again = B_TRUE;
8446 8446 }
8447 8447
8448 8448 if (ha->task_daemon_flags & SEND_PLOGI) {
8449 8449 ha->task_daemon_flags &= ~SEND_PLOGI;
8450 8450 TASK_DAEMON_UNLOCK(ha);
8451 8451 (void) ql_n_port_plogi(ha);
8452 8452 TASK_DAEMON_LOCK(ha);
8453 8453 }
8454 8454
8455 8455 head = &ha->callback_queue;
8456 8456 if (head->first != NULL) {
8457 8457 sp = head->first->base_address;
8458 8458 link = &sp->cmd;
8459 8459
8460 8460 /* Dequeue command. */
8461 8461 ql_remove_link(head, link);
8462 8462
8463 8463 /* Release task daemon lock. */
8464 8464 TASK_DAEMON_UNLOCK(ha);
8465 8465
8466 8466 /* Do callback. */
8467 8467 if (sp->flags & SRB_UB_CALLBACK) {
8468 8468 ql_unsol_callback(sp);
8469 8469 } else {
8470 8470 (*sp->pkt->pkt_comp)(sp->pkt);
8471 8471 }
8472 8472
8473 8473 /* Acquire task daemon lock. */
8474 8474 TASK_DAEMON_LOCK(ha);
8475 8475
8476 8476 loop_again = TRUE;
8477 8477 }
8478 8478
8479 8479 } while (loop_again);
8480 8480 }
8481 8481
8482 8482 /*
8483 8483 * ql_idle_check
8484 8484 * Test for adapter is alive and well.
8485 8485 *
8486 8486 * Input:
8487 8487 * ha: adapter state pointer.
8488 8488 *
8489 8489 * Context:
8490 8490 * Kernel context.
8491 8491 */
8492 8492 static void
8493 8493 ql_idle_check(ql_adapter_state_t *ha)
8494 8494 {
8495 8495 ddi_devstate_t state;
8496 8496 int rval;
8497 8497 ql_mbx_data_t mr;
8498 8498
8499 8499 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8500 8500
8501 8501 /* Firmware Ready Test. */
8502 8502 rval = ql_get_firmware_state(ha, &mr);
8503 8503 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8504 8504 (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8505 8505 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8506 8506 state = ddi_get_devstate(ha->dip);
8507 8507 if (state == DDI_DEVSTATE_UP) {
8508 8508 /*EMPTY*/
8509 8509 ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8510 8510 DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8511 8511 }
8512 8512 TASK_DAEMON_LOCK(ha);
8513 8513 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8514 8514 EL(ha, "fstate_ready, isp_abort_needed\n");
8515 8515 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8516 8516 }
8517 8517 TASK_DAEMON_UNLOCK(ha);
8518 8518 }
8519 8519
8520 8520 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8521 8521 }
8522 8522
8523 8523 /*
8524 8524 * ql_unsol_callback
8525 8525 * Handle unsolicited buffer callbacks.
8526 8526 *
8527 8527 * Input:
8528 8528 * ha = adapter state pointer.
8529 8529 * sp = srb pointer.
8530 8530 *
8531 8531 * Context:
8532 8532 * Kernel context.
8533 8533 */
8534 8534 static void
8535 8535 ql_unsol_callback(ql_srb_t *sp)
8536 8536 {
8537 8537 fc_affected_id_t *af;
8538 8538 fc_unsol_buf_t *ubp;
8539 8539 uchar_t r_ctl;
8540 8540 uchar_t ls_code;
8541 8541 ql_tgt_t *tq;
8542 8542 ql_adapter_state_t *ha = sp->ha, *pha = sp->ha->pha;
8543 8543
8544 8544 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8545 8545
8546 8546 ubp = ha->ub_array[sp->handle];
8547 8547 r_ctl = ubp->ub_frame.r_ctl;
8548 8548 ls_code = ubp->ub_buffer[0];
8549 8549
8550 8550 if (sp->lun_queue == NULL) {
8551 8551 tq = NULL;
8552 8552 } else {
8553 8553 tq = sp->lun_queue->target_queue;
8554 8554 }
8555 8555
8556 8556 QL_UB_LOCK(ha);
8557 8557 if (sp->flags & SRB_UB_FREE_REQUESTED ||
8558 8558 pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8559 8559 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8560 8560 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8561 8561 sp->flags |= SRB_UB_IN_FCA;
8562 8562 QL_UB_UNLOCK(ha);
8563 8563 return;
8564 8564 }
8565 8565
8566 8566 /* Process RSCN */
8567 8567 if (sp->flags & SRB_UB_RSCN) {
8568 8568 int sendup = 1;
8569 8569
8570 8570 /*
8571 8571 * Defer RSCN posting until commands return
8572 8572 */
8573 8573 QL_UB_UNLOCK(ha);
8574 8574
8575 8575 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8576 8576
8577 8577 /* Abort outstanding commands */
8578 8578 sendup = ql_process_rscn(ha, af);
8579 8579 if (sendup == 0) {
8580 8580
8581 8581 TASK_DAEMON_LOCK(ha);
8582 8582 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8583 8583 TASK_DAEMON_UNLOCK(ha);
8584 8584
8585 8585 /*
8586 8586 * Wait for commands to drain in F/W (doesn't take
8587 8587 * more than a few milliseconds)
8588 8588 */
8589 8589 ql_delay(ha, 10000);
8590 8590
8591 8591 QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8592 8592 "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8593 8593 af->aff_format, af->aff_d_id);
8594 8594 return;
8595 8595 }
8596 8596
8597 8597 QL_UB_LOCK(ha);
8598 8598
8599 8599 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8600 8600 af->aff_format, af->aff_d_id);
8601 8601 }
8602 8602
8603 8603 /* Process UNSOL LOGO */
8604 8604 if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8605 8605 QL_UB_UNLOCK(ha);
8606 8606
8607 8607 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8608 8608 TASK_DAEMON_LOCK(ha);
8609 8609 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8610 8610 TASK_DAEMON_UNLOCK(ha);
8611 8611 QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8612 8612 "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8613 8613 return;
8614 8614 }
8615 8615
8616 8616 QL_UB_LOCK(ha);
8617 8617 EL(ha, "sending unsol logout for %xh to transport\n",
8618 8618 ubp->ub_frame.s_id);
8619 8619 }
8620 8620
8621 8621 sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8622 8622 SRB_UB_FCP);
8623 8623
8624 8624 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8625 8625 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8626 8626 ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8627 8627 }
8628 8628 QL_UB_UNLOCK(ha);
8629 8629
8630 8630 (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8631 8631 ubp, sp->ub_type);
8632 8632
8633 8633 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8634 8634 }
8635 8635
8636 8636 /*
8637 8637 * ql_send_logo
8638 8638 *
8639 8639 * Input:
8640 8640 * ha: adapter state pointer.
8641 8641 * tq: target queue pointer.
8642 8642 * done_q: done queue pointer.
8643 8643 *
8644 8644 * Context:
8645 8645 * Interrupt or Kernel context, no mailbox commands allowed.
8646 8646 */
8647 8647 void
8648 8648 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8649 8649 {
8650 8650 fc_unsol_buf_t *ubp;
8651 8651 ql_srb_t *sp;
8652 8652 la_els_logo_t *payload;
8653 8653 ql_adapter_state_t *ha = vha->pha;
8654 8654
8655 8655 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8656 8656 tq->d_id.b24);
8657 8657
8658 8658 if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8659 8659 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8660 8660 return;
8661 8661 }
8662 8662
8663 8663 if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8664 8664 tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8665 8665
8666 8666 /* Locate a buffer to use. */
8667 8667 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8668 8668 if (ubp == NULL) {
8669 8669 EL(vha, "Failed, get_unsolicited_buffer\n");
8670 8670 return;
8671 8671 }
8672 8672
8673 8673 DEVICE_QUEUE_LOCK(tq);
8674 8674 tq->flags |= TQF_NEED_AUTHENTICATION;
8675 8675 tq->logout_sent++;
8676 8676 DEVICE_QUEUE_UNLOCK(tq);
8677 8677
8678 8678 EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8679 8679
8680 8680 sp = ubp->ub_fca_private;
8681 8681
8682 8682 /* Set header. */
8683 8683 ubp->ub_frame.d_id = vha->d_id.b24;
8684 8684 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8685 8685 ubp->ub_frame.s_id = tq->d_id.b24;
8686 8686 ubp->ub_frame.rsvd = 0;
8687 8687 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8688 8688 F_CTL_SEQ_INITIATIVE;
8689 8689 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8690 8690 ubp->ub_frame.seq_cnt = 0;
8691 8691 ubp->ub_frame.df_ctl = 0;
8692 8692 ubp->ub_frame.seq_id = 0;
8693 8693 ubp->ub_frame.rx_id = 0xffff;
8694 8694 ubp->ub_frame.ox_id = 0xffff;
8695 8695
8696 8696 /* set payload. */
8697 8697 payload = (la_els_logo_t *)ubp->ub_buffer;
8698 8698 bzero(payload, sizeof (la_els_logo_t));
8699 8699 /* Make sure ls_code in payload is always big endian */
8700 8700 ubp->ub_buffer[0] = LA_ELS_LOGO;
8701 8701 ubp->ub_buffer[1] = 0;
8702 8702 ubp->ub_buffer[2] = 0;
8703 8703 ubp->ub_buffer[3] = 0;
8704 8704 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8705 8705 &payload->nport_ww_name.raw_wwn[0], 8);
8706 8706 payload->nport_id.port_id = tq->d_id.b24;
8707 8707
8708 8708 QL_UB_LOCK(ha);
8709 8709 sp->flags |= SRB_UB_CALLBACK;
8710 8710 QL_UB_UNLOCK(ha);
8711 8711 if (tq->lun_queues.first != NULL) {
8712 8712 sp->lun_queue = (tq->lun_queues.first)->base_address;
8713 8713 } else {
8714 8714 sp->lun_queue = ql_lun_queue(vha, tq, 0);
8715 8715 }
8716 8716 if (done_q) {
8717 8717 ql_add_link_b(done_q, &sp->cmd);
8718 8718 } else {
8719 8719 ql_awaken_task_daemon(ha, sp, 0, 0);
8720 8720 }
8721 8721 }
8722 8722
8723 8723 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8724 8724 }
8725 8725
8726 8726 static int
8727 8727 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8728 8728 {
8729 8729 port_id_t d_id;
8730 8730 ql_srb_t *sp;
8731 8731 ql_link_t *link;
8732 8732 int sendup = 1;
8733 8733
8734 8734 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8735 8735
8736 8736 DEVICE_QUEUE_LOCK(tq);
8737 8737 if (tq->outcnt) {
8738 8738 DEVICE_QUEUE_UNLOCK(tq);
8739 8739 sendup = 0;
8740 8740 (void) ql_abort_device(ha, tq, 1);
8741 8741 ql_delay(ha, 10000);
8742 8742 } else {
8743 8743 DEVICE_QUEUE_UNLOCK(tq);
8744 8744 TASK_DAEMON_LOCK(ha);
8745 8745
8746 8746 for (link = ha->pha->callback_queue.first; link != NULL;
8747 8747 link = link->next) {
8748 8748 sp = link->base_address;
8749 8749 if (sp->flags & SRB_UB_CALLBACK) {
8750 8750 continue;
8751 8751 }
8752 8752 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8753 8753
8754 8754 if (tq->d_id.b24 == d_id.b24) {
8755 8755 sendup = 0;
8756 8756 break;
8757 8757 }
8758 8758 }
8759 8759
8760 8760 TASK_DAEMON_UNLOCK(ha);
8761 8761 }
8762 8762
8763 8763 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8764 8764
8765 8765 return (sendup);
8766 8766 }
8767 8767
8768 8768 static int
8769 8769 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8770 8770 {
8771 8771 fc_unsol_buf_t *ubp;
8772 8772 ql_srb_t *sp;
8773 8773 la_els_logi_t *payload;
8774 8774 class_svc_param_t *class3_param;
8775 8775
8776 8776 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8777 8777
8778 8778 if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8779 8779 LOOP_DOWN)) {
8780 8780 EL(ha, "Failed, tqf=%xh\n", tq->flags);
8781 8781 return (QL_FUNCTION_FAILED);
8782 8782 }
8783 8783
8784 8784 /* Locate a buffer to use. */
8785 8785 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8786 8786 if (ubp == NULL) {
8787 8787 EL(ha, "Failed\n");
8788 8788 return (QL_FUNCTION_FAILED);
8789 8789 }
8790 8790
8791 8791 QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8792 8792 ha->instance, tq->d_id.b24);
8793 8793
8794 8794 EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8795 8795
8796 8796 sp = ubp->ub_fca_private;
8797 8797
8798 8798 /* Set header. */
8799 8799 ubp->ub_frame.d_id = ha->d_id.b24;
8800 8800 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8801 8801 ubp->ub_frame.s_id = tq->d_id.b24;
8802 8802 ubp->ub_frame.rsvd = 0;
8803 8803 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8804 8804 F_CTL_SEQ_INITIATIVE;
8805 8805 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8806 8806 ubp->ub_frame.seq_cnt = 0;
8807 8807 ubp->ub_frame.df_ctl = 0;
8808 8808 ubp->ub_frame.seq_id = 0;
8809 8809 ubp->ub_frame.rx_id = 0xffff;
8810 8810 ubp->ub_frame.ox_id = 0xffff;
8811 8811
8812 8812 /* set payload. */
8813 8813 payload = (la_els_logi_t *)ubp->ub_buffer;
8814 8814 bzero(payload, sizeof (payload));
8815 8815
8816 8816 payload->ls_code.ls_code = LA_ELS_PLOGI;
8817 8817 payload->common_service.fcph_version = 0x2006;
8818 8818 payload->common_service.cmn_features = 0x8800;
8819 8819
8820 8820 CFG_IST(ha, CFG_CTRL_24258081) ?
8821 8821 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8822 8822 ha->init_ctrl_blk.cb24.max_frame_length[0],
8823 8823 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8824 8824 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8825 8825 ha->init_ctrl_blk.cb.max_frame_length[0],
8826 8826 ha->init_ctrl_blk.cb.max_frame_length[1]));
8827 8827
8828 8828 payload->common_service.conc_sequences = 0xff;
8829 8829 payload->common_service.relative_offset = 0x03;
8830 8830 payload->common_service.e_d_tov = 0x7d0;
8831 8831
8832 8832 bcopy((void *)&tq->port_name[0],
8833 8833 (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8834 8834
8835 8835 bcopy((void *)&tq->node_name[0],
8836 8836 (void *)&payload->node_ww_name.raw_wwn[0], 8);
8837 8837
8838 8838 class3_param = (class_svc_param_t *)&payload->class_3;
8839 8839 class3_param->class_valid_svc_opt = 0x8000;
8840 8840 class3_param->recipient_ctl = tq->class3_recipient_ctl;
8841 8841 class3_param->rcv_data_size = tq->class3_rcv_data_size;
8842 8842 class3_param->conc_sequences = tq->class3_conc_sequences;
8843 8843 class3_param->open_sequences_per_exch =
8844 8844 tq->class3_open_sequences_per_exch;
8845 8845
8846 8846 QL_UB_LOCK(ha);
8847 8847 sp->flags |= SRB_UB_CALLBACK;
8848 8848 QL_UB_UNLOCK(ha);
8849 8849
8850 8850 ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8851 8851
8852 8852 if (done_q) {
8853 8853 ql_add_link_b(done_q, &sp->cmd);
8854 8854 } else {
8855 8855 ql_awaken_task_daemon(ha, sp, 0, 0);
8856 8856 }
8857 8857
8858 8858 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8859 8859
8860 8860 return (QL_SUCCESS);
8861 8861 }
8862 8862
8863 8863 /*
8864 8864 * Abort outstanding commands in the Firmware, clear internally
8865 8865 * queued commands in the driver, Synchronize the target with
8866 8866 * the Firmware
8867 8867 */
8868 8868 int
8869 8869 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8870 8870 {
8871 8871 ql_link_t *link, *link2;
8872 8872 ql_lun_t *lq;
8873 8873 int rval = QL_SUCCESS;
8874 8874 ql_srb_t *sp;
8875 8875 ql_head_t done_q = { NULL, NULL };
8876 8876
8877 8877 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8878 8878
8879 8879 /*
8880 8880 * First clear, internally queued commands
8881 8881 */
8882 8882 DEVICE_QUEUE_LOCK(tq);
8883 8883 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8884 8884 lq = link->base_address;
8885 8885
8886 8886 link2 = lq->cmd.first;
8887 8887 while (link2 != NULL) {
8888 8888 sp = link2->base_address;
8889 8889 link2 = link2->next;
8890 8890
8891 8891 if (sp->flags & SRB_ABORT) {
8892 8892 continue;
8893 8893 }
8894 8894
8895 8895 /* Remove srb from device command queue. */
8896 8896 ql_remove_link(&lq->cmd, &sp->cmd);
8897 8897 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8898 8898
8899 8899 /* Set ending status. */
8900 8900 sp->pkt->pkt_reason = CS_ABORTED;
8901 8901
8902 8902 /* Call done routine to handle completions. */
8903 8903 ql_add_link_b(&done_q, &sp->cmd);
8904 8904 }
8905 8905 }
8906 8906 DEVICE_QUEUE_UNLOCK(tq);
8907 8907
8908 8908 if (done_q.first != NULL) {
8909 8909 ql_done(done_q.first);
8910 8910 }
8911 8911
8912 8912 if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8913 8913 rval = ql_abort_target(ha, tq, 0);
8914 8914 }
8915 8915
8916 8916 if (rval != QL_SUCCESS) {
8917 8917 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8918 8918 } else {
8919 8919 /*EMPTY*/
8920 8920 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8921 8921 ha->vp_index);
8922 8922 }
8923 8923
8924 8924 return (rval);
8925 8925 }
8926 8926
8927 8927 /*
8928 8928 * ql_rcv_rscn_els
8929 8929 * Processes received RSCN extended link service.
8930 8930 *
8931 8931 * Input:
8932 8932 * ha: adapter state pointer.
8933 8933 * mb: array containing input mailbox registers.
8934 8934 * done_q: done queue pointer.
8935 8935 *
8936 8936 * Context:
8937 8937 * Interrupt or Kernel context, no mailbox commands allowed.
8938 8938 */
8939 8939 void
8940 8940 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8941 8941 {
8942 8942 fc_unsol_buf_t *ubp;
8943 8943 ql_srb_t *sp;
8944 8944 fc_rscn_t *rn;
8945 8945 fc_affected_id_t *af;
8946 8946 port_id_t d_id;
8947 8947
8948 8948 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8949 8949
8950 8950 /* Locate a buffer to use. */
8951 8951 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8952 8952 if (ubp != NULL) {
8953 8953 sp = ubp->ub_fca_private;
8954 8954
8955 8955 /* Set header. */
8956 8956 ubp->ub_frame.d_id = ha->d_id.b24;
8957 8957 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8958 8958 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8959 8959 ubp->ub_frame.rsvd = 0;
8960 8960 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8961 8961 F_CTL_SEQ_INITIATIVE;
8962 8962 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8963 8963 ubp->ub_frame.seq_cnt = 0;
8964 8964 ubp->ub_frame.df_ctl = 0;
8965 8965 ubp->ub_frame.seq_id = 0;
8966 8966 ubp->ub_frame.rx_id = 0xffff;
8967 8967 ubp->ub_frame.ox_id = 0xffff;
8968 8968
8969 8969 /* set payload. */
8970 8970 rn = (fc_rscn_t *)ubp->ub_buffer;
8971 8971 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8972 8972
8973 8973 rn->rscn_code = LA_ELS_RSCN;
8974 8974 rn->rscn_len = 4;
8975 8975 rn->rscn_payload_len = 8;
8976 8976 d_id.b.al_pa = LSB(mb[2]);
8977 8977 d_id.b.area = MSB(mb[2]);
8978 8978 d_id.b.domain = LSB(mb[1]);
8979 8979 af->aff_d_id = d_id.b24;
8980 8980 af->aff_format = MSB(mb[1]);
8981 8981
8982 8982 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8983 8983 af->aff_d_id);
8984 8984
8985 8985 ql_update_rscn(ha, af);
8986 8986
8987 8987 QL_UB_LOCK(ha);
8988 8988 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8989 8989 QL_UB_UNLOCK(ha);
8990 8990 ql_add_link_b(done_q, &sp->cmd);
8991 8991 }
8992 8992
8993 8993 if (ubp == NULL) {
8994 8994 EL(ha, "Failed, get_unsolicited_buffer\n");
8995 8995 } else {
8996 8996 /*EMPTY*/
8997 8997 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8998 8998 }
8999 8999 }
9000 9000
9001 9001 /*
9002 9002 * ql_update_rscn
9003 9003 * Update devices from received RSCN.
9004 9004 *
9005 9005 * Input:
9006 9006 * ha: adapter state pointer.
9007 9007 * af: pointer to RSCN data.
9008 9008 *
9009 9009 * Context:
9010 9010 * Interrupt or Kernel context, no mailbox commands allowed.
9011 9011 */
9012 9012 static void
9013 9013 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9014 9014 {
9015 9015 ql_link_t *link;
9016 9016 uint16_t index;
9017 9017 ql_tgt_t *tq;
9018 9018
9019 9019 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9020 9020
9021 9021 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9022 9022 port_id_t d_id;
9023 9023
9024 9024 d_id.r.rsvd_1 = 0;
9025 9025 d_id.b24 = af->aff_d_id;
9026 9026
9027 9027 tq = ql_d_id_to_queue(ha, d_id);
9028 9028 if (tq) {
9029 9029 EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
9030 9030 DEVICE_QUEUE_LOCK(tq);
9031 9031 tq->flags |= TQF_RSCN_RCVD;
9032 9032 DEVICE_QUEUE_UNLOCK(tq);
9033 9033 }
9034 9034 QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
9035 9035 ha->instance);
9036 9036
9037 9037 return;
9038 9038 }
9039 9039
9040 9040 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9041 9041 for (link = ha->dev[index].first; link != NULL;
9042 9042 link = link->next) {
9043 9043 tq = link->base_address;
9044 9044
9045 9045 switch (af->aff_format) {
9046 9046 case FC_RSCN_FABRIC_ADDRESS:
9047 9047 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9048 9048 EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9049 9049 tq->d_id.b24);
9050 9050 DEVICE_QUEUE_LOCK(tq);
9051 9051 tq->flags |= TQF_RSCN_RCVD;
9052 9052 DEVICE_QUEUE_UNLOCK(tq);
9053 9053 }
9054 9054 break;
9055 9055
9056 9056 case FC_RSCN_AREA_ADDRESS:
9057 9057 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9058 9058 EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9059 9059 tq->d_id.b24);
9060 9060 DEVICE_QUEUE_LOCK(tq);
9061 9061 tq->flags |= TQF_RSCN_RCVD;
9062 9062 DEVICE_QUEUE_UNLOCK(tq);
9063 9063 }
9064 9064 break;
9065 9065
9066 9066 case FC_RSCN_DOMAIN_ADDRESS:
9067 9067 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9068 9068 EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9069 9069 tq->d_id.b24);
9070 9070 DEVICE_QUEUE_LOCK(tq);
9071 9071 tq->flags |= TQF_RSCN_RCVD;
9072 9072 DEVICE_QUEUE_UNLOCK(tq);
9073 9073 }
9074 9074 break;
9075 9075
9076 9076 default:
9077 9077 break;
9078 9078 }
9079 9079 }
9080 9080 }
9081 9081 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9082 9082 }
9083 9083
9084 9084 /*
9085 9085 * ql_process_rscn
9086 9086 *
9087 9087 * Input:
9088 9088 * ha: adapter state pointer.
9089 9089 * af: RSCN payload pointer.
9090 9090 *
9091 9091 * Context:
9092 9092 * Kernel context.
9093 9093 */
9094 9094 static int
9095 9095 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9096 9096 {
9097 9097 int sendit;
9098 9098 int sendup = 1;
9099 9099 ql_link_t *link;
9100 9100 uint16_t index;
9101 9101 ql_tgt_t *tq;
9102 9102
9103 9103 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9104 9104
9105 9105 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9106 9106 port_id_t d_id;
9107 9107
9108 9108 d_id.r.rsvd_1 = 0;
9109 9109 d_id.b24 = af->aff_d_id;
9110 9110
9111 9111 tq = ql_d_id_to_queue(ha, d_id);
9112 9112 if (tq) {
9113 9113 sendup = ql_process_rscn_for_device(ha, tq);
9114 9114 }
9115 9115
9116 9116 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9117 9117
9118 9118 return (sendup);
9119 9119 }
9120 9120
9121 9121 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9122 9122 for (link = ha->dev[index].first; link != NULL;
9123 9123 link = link->next) {
9124 9124
9125 9125 tq = link->base_address;
9126 9126 if (tq == NULL) {
9127 9127 continue;
9128 9128 }
9129 9129
9130 9130 switch (af->aff_format) {
9131 9131 case FC_RSCN_FABRIC_ADDRESS:
9132 9132 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9133 9133 sendit = ql_process_rscn_for_device(
9134 9134 ha, tq);
9135 9135 if (sendup) {
9136 9136 sendup = sendit;
9137 9137 }
9138 9138 }
9139 9139 break;
9140 9140
9141 9141 case FC_RSCN_AREA_ADDRESS:
9142 9142 if ((tq->d_id.b24 & 0xffff00) ==
9143 9143 af->aff_d_id) {
9144 9144 sendit = ql_process_rscn_for_device(
9145 9145 ha, tq);
9146 9146
9147 9147 if (sendup) {
9148 9148 sendup = sendit;
9149 9149 }
9150 9150 }
9151 9151 break;
9152 9152
9153 9153 case FC_RSCN_DOMAIN_ADDRESS:
9154 9154 if ((tq->d_id.b24 & 0xff0000) ==
9155 9155 af->aff_d_id) {
9156 9156 sendit = ql_process_rscn_for_device(
9157 9157 ha, tq);
9158 9158
9159 9159 if (sendup) {
9160 9160 sendup = sendit;
9161 9161 }
9162 9162 }
9163 9163 break;
9164 9164
9165 9165 default:
9166 9166 break;
9167 9167 }
9168 9168 }
9169 9169 }
9170 9170
9171 9171 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9172 9172
9173 9173 return (sendup);
9174 9174 }
9175 9175
9176 9176 /*
9177 9177 * ql_process_rscn_for_device
9178 9178 *
9179 9179 * Input:
9180 9180 * ha: adapter state pointer.
9181 9181 * tq: target queue pointer.
9182 9182 *
9183 9183 * Context:
9184 9184 * Kernel context.
9185 9185 */
9186 9186 static int
9187 9187 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9188 9188 {
9189 9189 int sendup = 1;
9190 9190
9191 9191 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9192 9192
9193 9193 DEVICE_QUEUE_LOCK(tq);
9194 9194
9195 9195 /*
9196 9196 * Let FCP-2 compliant devices continue I/Os
9197 9197 * with their low level recoveries.
9198 9198 */
9199 9199 if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9200 9200 (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9201 9201 /*
9202 9202 * Cause ADISC to go out
9203 9203 */
9204 9204 DEVICE_QUEUE_UNLOCK(tq);
9205 9205
9206 9206 (void) ql_get_port_database(ha, tq, PDF_NONE);
9207 9207
9208 9208 DEVICE_QUEUE_LOCK(tq);
9209 9209 tq->flags &= ~TQF_RSCN_RCVD;
9210 9210
9211 9211 } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9212 9212 if (tq->d_id.b24 != BROADCAST_ADDR) {
9213 9213 tq->flags |= TQF_NEED_AUTHENTICATION;
9214 9214 }
9215 9215
9216 9216 DEVICE_QUEUE_UNLOCK(tq);
9217 9217
9218 9218 (void) ql_abort_device(ha, tq, 1);
9219 9219
9220 9220 DEVICE_QUEUE_LOCK(tq);
9221 9221
9222 9222 if (tq->outcnt) {
9223 9223 sendup = 0;
9224 9224 } else {
9225 9225 tq->flags &= ~TQF_RSCN_RCVD;
9226 9226 }
9227 9227 } else {
9228 9228 tq->flags &= ~TQF_RSCN_RCVD;
9229 9229 }
9230 9230
9231 9231 if (sendup) {
9232 9232 if (tq->d_id.b24 != BROADCAST_ADDR) {
9233 9233 tq->flags |= TQF_NEED_AUTHENTICATION;
9234 9234 }
9235 9235 }
9236 9236
9237 9237 DEVICE_QUEUE_UNLOCK(tq);
9238 9238
9239 9239 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9240 9240
9241 9241 return (sendup);
9242 9242 }
9243 9243
9244 9244 static int
9245 9245 ql_handle_rscn_update(ql_adapter_state_t *ha)
9246 9246 {
9247 9247 int rval;
9248 9248 ql_tgt_t *tq;
9249 9249 uint16_t index, loop_id;
9250 9250 ql_dev_id_list_t *list;
9251 9251 uint32_t list_size;
9252 9252 port_id_t d_id;
9253 9253 ql_mbx_data_t mr;
9254 9254 ql_head_t done_q = { NULL, NULL };
9255 9255
9256 9256 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9257 9257
9258 9258 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9259 9259 list = kmem_zalloc(list_size, KM_SLEEP);
9260 9260 if (list == NULL) {
9261 9261 rval = QL_MEMORY_ALLOC_FAILED;
9262 9262 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9263 9263 return (rval);
9264 9264 }
9265 9265
9266 9266 /*
9267 9267 * Get data from RISC code d_id list to init each device queue.
9268 9268 */
9269 9269 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9270 9270 if (rval != QL_SUCCESS) {
9271 9271 kmem_free(list, list_size);
9272 9272 EL(ha, "get_id_list failed=%xh\n", rval);
9273 9273 return (rval);
9274 9274 }
9275 9275
9276 9276 /* Acquire adapter state lock. */
9277 9277 ADAPTER_STATE_LOCK(ha);
9278 9278
9279 9279 /* Check for new devices */
9280 9280 for (index = 0; index < mr.mb[1]; index++) {
9281 9281 ql_dev_list(ha, list, index, &d_id, &loop_id);
9282 9282
9283 9283 if (VALID_DEVICE_ID(ha, loop_id)) {
9284 9284 d_id.r.rsvd_1 = 0;
9285 9285
9286 9286 tq = ql_d_id_to_queue(ha, d_id);
9287 9287 if (tq != NULL) {
9288 9288 continue;
9289 9289 }
9290 9290
9291 9291 tq = ql_dev_init(ha, d_id, loop_id);
9292 9292
9293 9293 /* Test for fabric device. */
9294 9294 if (d_id.b.domain != ha->d_id.b.domain ||
9295 9295 d_id.b.area != ha->d_id.b.area) {
9296 9296 tq->flags |= TQF_FABRIC_DEVICE;
9297 9297 }
9298 9298
9299 9299 ADAPTER_STATE_UNLOCK(ha);
9300 9300 if (ql_get_port_database(ha, tq, PDF_NONE) !=
9301 9301 QL_SUCCESS) {
9302 9302 tq->loop_id = PORT_NO_LOOP_ID;
9303 9303 }
9304 9304 ADAPTER_STATE_LOCK(ha);
9305 9305
9306 9306 /*
9307 9307 * Send up a PLOGI about the new device
9308 9308 */
9309 9309 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9310 9310 (void) ql_send_plogi(ha, tq, &done_q);
9311 9311 }
9312 9312 }
9313 9313 }
9314 9314
9315 9315 /* Release adapter state lock. */
9316 9316 ADAPTER_STATE_UNLOCK(ha);
9317 9317
9318 9318 if (done_q.first != NULL) {
9319 9319 ql_done(done_q.first);
9320 9320 }
9321 9321
9322 9322 kmem_free(list, list_size);
9323 9323
9324 9324 if (rval != QL_SUCCESS) {
9325 9325 EL(ha, "failed=%xh\n", rval);
9326 9326 } else {
9327 9327 /*EMPTY*/
9328 9328 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9329 9329 }
9330 9330
9331 9331 return (rval);
9332 9332 }
9333 9333
9334 9334 /*
9335 9335 * ql_free_unsolicited_buffer
9336 9336 * Frees allocated buffer.
9337 9337 *
9338 9338 * Input:
9339 9339 * ha = adapter state pointer.
9340 9340 * index = buffer array index.
9341 9341 * ADAPTER_STATE_LOCK must be already obtained.
9342 9342 *
9343 9343 * Context:
9344 9344 * Kernel context.
9345 9345 */
9346 9346 static void
9347 9347 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9348 9348 {
9349 9349 ql_srb_t *sp;
9350 9350 int status;
9351 9351
9352 9352 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9353 9353
9354 9354 sp = ubp->ub_fca_private;
9355 9355 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9356 9356 /* Disconnect IP from system buffers. */
9357 9357 if (ha->flags & IP_INITIALIZED) {
9358 9358 ADAPTER_STATE_UNLOCK(ha);
9359 9359 status = ql_shutdown_ip(ha);
9360 9360 ADAPTER_STATE_LOCK(ha);
9361 9361 if (status != QL_SUCCESS) {
9362 9362 cmn_err(CE_WARN,
9363 9363 "!Qlogic %s(%d): Failed to shutdown IP",
9364 9364 QL_NAME, ha->instance);
9365 9365 return;
9366 9366 }
9367 9367
9368 9368 ha->flags &= ~IP_ENABLED;
9369 9369 }
9370 9370
9371 9371 ql_free_phys(ha, &sp->ub_buffer);
9372 9372 } else {
9373 9373 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9374 9374 }
9375 9375
9376 9376 kmem_free(sp, sizeof (ql_srb_t));
9377 9377 kmem_free(ubp, sizeof (fc_unsol_buf_t));
9378 9378
9379 9379 if (ha->ub_allocated != 0) {
9380 9380 ha->ub_allocated--;
9381 9381 }
9382 9382
9383 9383 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9384 9384 }
9385 9385
9386 9386 /*
9387 9387 * ql_get_unsolicited_buffer
9388 9388 * Locates a free unsolicited buffer.
9389 9389 *
9390 9390 * Input:
9391 9391 * ha = adapter state pointer.
9392 9392 * type = buffer type.
9393 9393 *
9394 9394 * Returns:
9395 9395 * Unsolicited buffer pointer.
9396 9396 *
9397 9397 * Context:
9398 9398 * Interrupt or Kernel context, no mailbox commands allowed.
9399 9399 */
9400 9400 fc_unsol_buf_t *
9401 9401 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9402 9402 {
9403 9403 fc_unsol_buf_t *ubp;
9404 9404 ql_srb_t *sp;
9405 9405 uint16_t index;
9406 9406
9407 9407 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9408 9408
9409 9409 /* Locate a buffer to use. */
9410 9410 ubp = NULL;
9411 9411
9412 9412 QL_UB_LOCK(ha);
9413 9413 for (index = 0; index < QL_UB_LIMIT; index++) {
9414 9414 ubp = ha->ub_array[index];
9415 9415 if (ubp != NULL) {
9416 9416 sp = ubp->ub_fca_private;
9417 9417 if ((sp->ub_type == type) &&
9418 9418 (sp->flags & SRB_UB_IN_FCA) &&
9419 9419 (!(sp->flags & (SRB_UB_CALLBACK |
9420 9420 SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9421 9421 sp->flags |= SRB_UB_ACQUIRED;
9422 9422 ubp->ub_resp_flags = 0;
9423 9423 break;
9424 9424 }
9425 9425 ubp = NULL;
9426 9426 }
9427 9427 }
9428 9428 QL_UB_UNLOCK(ha);
9429 9429
9430 9430 if (ubp) {
9431 9431 ubp->ub_resp_token = NULL;
9432 9432 ubp->ub_class = FC_TRAN_CLASS3;
9433 9433 }
9434 9434
9435 9435 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9436 9436
9437 9437 return (ubp);
9438 9438 }
9439 9439
9440 9440 /*
9441 9441 * ql_ub_frame_hdr
9442 9442 * Processes received unsolicited buffers from ISP.
9443 9443 *
9444 9444 * Input:
9445 9445 * ha: adapter state pointer.
9446 9446 * tq: target queue pointer.
9447 9447 * index: unsolicited buffer array index.
9448 9448 * done_q: done queue pointer.
9449 9449 *
9450 9450 * Returns:
9451 9451 * ql local function return status code.
9452 9452 *
9453 9453 * Context:
9454 9454 * Interrupt or Kernel context, no mailbox commands allowed.
9455 9455 */
9456 9456 int
9457 9457 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9458 9458 ql_head_t *done_q)
9459 9459 {
9460 9460 fc_unsol_buf_t *ubp;
9461 9461 ql_srb_t *sp;
9462 9462 uint16_t loop_id;
9463 9463 int rval = QL_FUNCTION_FAILED;
9464 9464
9465 9465 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9466 9466
9467 9467 QL_UB_LOCK(ha);
9468 9468 if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9469 9469 EL(ha, "Invalid buffer index=%xh\n", index);
9470 9470 QL_UB_UNLOCK(ha);
9471 9471 return (rval);
9472 9472 }
9473 9473
9474 9474 sp = ubp->ub_fca_private;
9475 9475 if (sp->flags & SRB_UB_FREE_REQUESTED) {
9476 9476 EL(ha, "buffer freed index=%xh\n", index);
9477 9477 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9478 9478 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9479 9479
9480 9480 sp->flags |= SRB_UB_IN_FCA;
9481 9481
9482 9482 QL_UB_UNLOCK(ha);
9483 9483 return (rval);
9484 9484 }
9485 9485
9486 9486 if ((sp->handle == index) &&
9487 9487 (sp->flags & SRB_UB_IN_ISP) &&
9488 9488 (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9489 9489 (!(sp->flags & SRB_UB_ACQUIRED))) {
9490 9490 /* set broadcast D_ID */
9491 9491 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9492 9492 BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9493 9493 if (tq->ub_loop_id == loop_id) {
9494 9494 if (ha->topology & QL_FL_PORT) {
9495 9495 ubp->ub_frame.d_id = 0x000000;
9496 9496 } else {
9497 9497 ubp->ub_frame.d_id = 0xffffff;
9498 9498 }
9499 9499 } else {
9500 9500 ubp->ub_frame.d_id = ha->d_id.b24;
9501 9501 }
9502 9502 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9503 9503 ubp->ub_frame.rsvd = 0;
9504 9504 ubp->ub_frame.s_id = tq->d_id.b24;
9505 9505 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9506 9506 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9507 9507 ubp->ub_frame.df_ctl = 0;
9508 9508 ubp->ub_frame.seq_id = tq->ub_seq_id;
9509 9509 ubp->ub_frame.rx_id = 0xffff;
9510 9510 ubp->ub_frame.ox_id = 0xffff;
9511 9511 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9512 9512 sp->ub_size : tq->ub_sequence_length;
9513 9513 ubp->ub_frame.ro = tq->ub_frame_ro;
9514 9514
9515 9515 tq->ub_sequence_length = (uint16_t)
9516 9516 (tq->ub_sequence_length - ubp->ub_bufsize);
9517 9517 tq->ub_frame_ro += ubp->ub_bufsize;
9518 9518 tq->ub_seq_cnt++;
9519 9519
9520 9520 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9521 9521 if (tq->ub_seq_cnt == 1) {
9522 9522 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9523 9523 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9524 9524 } else {
9525 9525 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9526 9526 F_CTL_END_SEQ;
9527 9527 }
9528 9528 tq->ub_total_seg_cnt = 0;
9529 9529 } else if (tq->ub_seq_cnt == 1) {
9530 9530 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9531 9531 F_CTL_FIRST_SEQ;
9532 9532 ubp->ub_frame.df_ctl = 0x20;
9533 9533 }
9534 9534
9535 9535 QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9536 9536 ha->instance, ubp->ub_frame.d_id);
9537 9537 QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9538 9538 ha->instance, ubp->ub_frame.s_id);
9539 9539 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9540 9540 ha->instance, ubp->ub_frame.seq_cnt);
9541 9541 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9542 9542 ha->instance, ubp->ub_frame.seq_id);
9543 9543 QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9544 9544 ha->instance, ubp->ub_frame.ro);
9545 9545 QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9546 9546 ha->instance, ubp->ub_frame.f_ctl);
9547 9547 QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9548 9548 ha->instance, ubp->ub_bufsize);
9549 9549 QL_DUMP_3(ubp->ub_buffer, 8,
9550 9550 ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9551 9551
9552 9552 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9553 9553 ql_add_link_b(done_q, &sp->cmd);
9554 9554 rval = QL_SUCCESS;
9555 9555 } else {
9556 9556 if (sp->handle != index) {
9557 9557 EL(ha, "Bad index=%xh, expect=%xh\n", index,
9558 9558 sp->handle);
9559 9559 }
9560 9560 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9561 9561 EL(ha, "buffer was already in driver, index=%xh\n",
9562 9562 index);
9563 9563 }
9564 9564 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9565 9565 EL(ha, "buffer was not an IP buffer, index=%xh\n",
9566 9566 index);
9567 9567 }
9568 9568 if (sp->flags & SRB_UB_ACQUIRED) {
9569 9569 EL(ha, "buffer was being used by driver, index=%xh\n",
9570 9570 index);
9571 9571 }
9572 9572 }
9573 9573 QL_UB_UNLOCK(ha);
9574 9574
9575 9575 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9576 9576
9577 9577 return (rval);
9578 9578 }
9579 9579
9580 9580 /*
9581 9581 * ql_timer
9582 9582 * One second timer function.
9583 9583 *
9584 9584 * Input:
9585 9585 * ql_hba.first = first link in adapter list.
9586 9586 *
9587 9587 * Context:
9588 9588 * Interrupt context, no mailbox commands allowed.
9589 9589 */
9590 9590 static void
9591 9591 ql_timer(void *arg)
9592 9592 {
9593 9593 ql_link_t *link;
9594 9594 uint32_t set_flags;
9595 9595 uint32_t reset_flags;
9596 9596 ql_adapter_state_t *ha = NULL, *vha;
9597 9597
9598 9598 QL_PRINT_6(CE_CONT, "started\n");
9599 9599
9600 9600 /* Acquire global state lock. */
9601 9601 GLOBAL_STATE_LOCK();
9602 9602 if (ql_timer_timeout_id == NULL) {
9603 9603 /* Release global state lock. */
9604 9604 GLOBAL_STATE_UNLOCK();
9605 9605 return;
9606 9606 }
9607 9607
9608 9608 for (link = ql_hba.first; link != NULL; link = link->next) {
9609 9609 ha = link->base_address;
9610 9610
9611 9611 /* Skip adapter if suspended of stalled. */
9612 9612 ADAPTER_STATE_LOCK(ha);
9613 9613 if (ha->flags & ADAPTER_SUSPENDED ||
9614 9614 ha->task_daemon_flags & DRIVER_STALL) {
9615 9615 ADAPTER_STATE_UNLOCK(ha);
9616 9616 continue;
9617 9617 }
9618 9618 ha->flags |= ADAPTER_TIMER_BUSY;
9619 9619 ADAPTER_STATE_UNLOCK(ha);
9620 9620
9621 9621 QL_PM_LOCK(ha);
9622 9622 if (ha->power_level != PM_LEVEL_D0) {
9623 9623 QL_PM_UNLOCK(ha);
9624 9624
9625 9625 ADAPTER_STATE_LOCK(ha);
9626 9626 ha->flags &= ~ADAPTER_TIMER_BUSY;
9627 9627 ADAPTER_STATE_UNLOCK(ha);
9628 9628 continue;
9629 9629 }
9630 9630 ha->busy++;
9631 9631 QL_PM_UNLOCK(ha);
9632 9632
9633 9633 set_flags = 0;
9634 9634 reset_flags = 0;
9635 9635
9636 9636 /* Port retry timer handler. */
9637 9637 if (LOOP_READY(ha)) {
9638 9638 ADAPTER_STATE_LOCK(ha);
9639 9639 if (ha->port_retry_timer != 0) {
9640 9640 ha->port_retry_timer--;
9641 9641 if (ha->port_retry_timer == 0) {
9642 9642 set_flags |= PORT_RETRY_NEEDED;
9643 9643 }
9644 9644 }
9645 9645 ADAPTER_STATE_UNLOCK(ha);
9646 9646 }
9647 9647
9648 9648 /* Loop down timer handler. */
9649 9649 if (LOOP_RECONFIGURE(ha) == 0) {
9650 9650 if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9651 9651 ha->loop_down_timer--;
9652 9652 /*
9653 9653 * give the firmware loop down dump flag
9654 9654 * a chance to work.
9655 9655 */
9656 9656 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9657 9657 if (CFG_IST(ha,
9658 9658 CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9659 9659 (void) ql_binary_fw_dump(ha,
9660 9660 TRUE);
9661 9661 }
9662 9662 EL(ha, "loop_down_reset, "
9663 9663 "isp_abort_needed\n");
9664 9664 set_flags |= ISP_ABORT_NEEDED;
9665 9665 }
9666 9666 }
9667 9667 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9668 9668 /* Command abort time handler. */
9669 9669 if (ha->loop_down_timer ==
9670 9670 ha->loop_down_abort_time) {
9671 9671 ADAPTER_STATE_LOCK(ha);
9672 9672 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9673 9673 ADAPTER_STATE_UNLOCK(ha);
9674 9674 set_flags |= ABORT_QUEUES_NEEDED;
9675 9675 EL(ha, "loop_down_abort_time, "
9676 9676 "abort_queues_needed\n");
9677 9677 }
9678 9678
9679 9679 /* Watchdog timer handler. */
9680 9680 if (ha->watchdog_timer == 0) {
9681 9681 ha->watchdog_timer = WATCHDOG_TIME;
9682 9682 } else if (LOOP_READY(ha)) {
9683 9683 ha->watchdog_timer--;
9684 9684 if (ha->watchdog_timer == 0) {
9685 9685 for (vha = ha; vha != NULL;
9686 9686 vha = vha->vp_next) {
9687 9687 ql_watchdog(vha,
9688 9688 &set_flags,
9689 9689 &reset_flags);
9690 9690 }
9691 9691 ha->watchdog_timer =
9692 9692 WATCHDOG_TIME;
9693 9693 }
9694 9694 }
9695 9695 }
9696 9696 }
9697 9697
9698 9698 /* Idle timer handler. */
9699 9699 if (!DRIVER_SUSPENDED(ha)) {
9700 9700 if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9701 9701 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9702 9702 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9703 9703 #endif
9704 9704 ha->idle_timer = 0;
9705 9705 }
9706 9706 if (ha->send_plogi_timer != NULL) {
9707 9707 ha->send_plogi_timer--;
9708 9708 if (ha->send_plogi_timer == NULL) {
9709 9709 set_flags |= SEND_PLOGI;
9710 9710 }
9711 9711 }
9712 9712 }
9713 9713 ADAPTER_STATE_LOCK(ha);
9714 9714 if (ha->idc_restart_timer != 0) {
9715 9715 ha->idc_restart_timer--;
9716 9716 if (ha->idc_restart_timer == 0) {
9717 9717 ha->idc_restart_cnt = 0;
9718 9718 reset_flags |= DRIVER_STALL;
9719 9719 }
9720 9720 }
9721 9721 if (ha->idc_flash_acc_timer != 0) {
9722 9722 ha->idc_flash_acc_timer--;
9723 9723 if (ha->idc_flash_acc_timer == 0 &&
9724 9724 ha->idc_flash_acc != 0) {
9725 9725 ha->idc_flash_acc = 1;
9726 9726 ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9727 9727 ha->idc_mb[1] = 0;
9728 9728 ha->idc_mb[2] = IDC_OPC_DRV_START;
9729 9729 set_flags |= IDC_EVENT;
9730 9730 }
9731 9731 }
9732 9732 ADAPTER_STATE_UNLOCK(ha);
9733 9733
9734 9734 if (set_flags != 0 || reset_flags != 0) {
9735 9735 ql_awaken_task_daemon(ha, NULL, set_flags,
9736 9736 reset_flags);
9737 9737 }
9738 9738
9739 9739 if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9740 9740 ql_blink_led(ha);
9741 9741 }
9742 9742
9743 9743 /* Update the IO stats */
9744 9744 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9745 9745 ha->xioctl->IOInputMByteCnt +=
9746 9746 (ha->xioctl->IOInputByteCnt / 0x100000);
9747 9747 ha->xioctl->IOInputByteCnt %= 0x100000;
9748 9748 }
9749 9749
9750 9750 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9751 9751 ha->xioctl->IOOutputMByteCnt +=
9752 9752 (ha->xioctl->IOOutputByteCnt / 0x100000);
9753 9753 ha->xioctl->IOOutputByteCnt %= 0x100000;
9754 9754 }
9755 9755
9756 9756 if (CFG_IST(ha, CFG_CTRL_8021)) {
9757 9757 (void) ql_8021_idc_handler(ha);
9758 9758 }
9759 9759
9760 9760 ADAPTER_STATE_LOCK(ha);
9761 9761 ha->flags &= ~ADAPTER_TIMER_BUSY;
9762 9762 ADAPTER_STATE_UNLOCK(ha);
9763 9763
9764 9764 QL_PM_LOCK(ha);
9765 9765 ha->busy--;
9766 9766 QL_PM_UNLOCK(ha);
9767 9767 }
9768 9768
9769 9769 /* Restart timer, if not being stopped. */
9770 9770 if (ql_timer_timeout_id != NULL) {
9771 9771 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9772 9772 }
9773 9773
9774 9774 /* Release global state lock. */
9775 9775 GLOBAL_STATE_UNLOCK();
9776 9776
9777 9777 QL_PRINT_6(CE_CONT, "done\n");
9778 9778 }
9779 9779
9780 9780 /*
9781 9781 * ql_timeout_insert
9782 9782 * Function used to insert a command block onto the
9783 9783 * watchdog timer queue.
9784 9784 *
9785 9785 * Note: Must insure that pkt_time is not zero
9786 9786 * before calling ql_timeout_insert.
9787 9787 *
9788 9788 * Input:
9789 9789 * ha: adapter state pointer.
9790 9790 * tq: target queue pointer.
9791 9791 * sp: SRB pointer.
9792 9792 * DEVICE_QUEUE_LOCK must be already obtained.
9793 9793 *
9794 9794 * Context:
9795 9795 * Kernel context.
9796 9796 */
9797 9797 /* ARGSUSED */
9798 9798 static void
9799 9799 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9800 9800 {
9801 9801 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9802 9802
9803 9803 if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9804 9804 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9805 9805 /*
9806 9806 * The WATCHDOG_TIME must be rounded up + 1. As an example,
9807 9807 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9808 9808 * will expire in the next watchdog call, which could be in
9809 9809 * 1 microsecond.
9810 9810 *
9811 9811 */
9812 9812 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9813 9813 WATCHDOG_TIME;
9814 9814 /*
9815 9815 * Added an additional 10 to account for the
9816 9816 * firmware timer drift which can occur with
9817 9817 * very long timeout values.
9818 9818 */
9819 9819 sp->wdg_q_time += 10;
9820 9820
9821 9821 /*
9822 9822 * Add 6 more to insure watchdog does not timeout at the same
9823 9823 * time as ISP RISC code timeout.
9824 9824 */
9825 9825 sp->wdg_q_time += 6;
9826 9826
9827 9827 /* Save initial time for resetting watchdog time. */
9828 9828 sp->init_wdg_q_time = sp->wdg_q_time;
9829 9829
9830 9830 /* Insert command onto watchdog queue. */
9831 9831 ql_add_link_b(&tq->wdg, &sp->wdg);
9832 9832
9833 9833 sp->flags |= SRB_WATCHDOG_ENABLED;
9834 9834 } else {
9835 9835 sp->isp_timeout = 0;
9836 9836 sp->wdg_q_time = 0;
9837 9837 sp->init_wdg_q_time = 0;
9838 9838 }
9839 9839
9840 9840 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9841 9841 }
9842 9842
9843 9843 /*
9844 9844 * ql_watchdog
9845 9845 * Timeout handler that runs in interrupt context. The
9846 9846 * ql_adapter_state_t * argument is the parameter set up when the
9847 9847 * timeout was initialized (state structure pointer).
9848 9848 * Function used to update timeout values and if timeout
9849 9849 * has occurred command will be aborted.
9850 9850 *
9851 9851 * Input:
9852 9852 * ha: adapter state pointer.
9853 9853 * set_flags: task daemon flags to set.
9854 9854 * reset_flags: task daemon flags to reset.
9855 9855 *
9856 9856 * Context:
9857 9857 * Interrupt context, no mailbox commands allowed.
9858 9858 */
9859 9859 static void
9860 9860 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9861 9861 {
9862 9862 ql_srb_t *sp;
9863 9863 ql_link_t *link;
9864 9864 ql_link_t *next_cmd;
9865 9865 ql_link_t *next_device;
9866 9866 ql_tgt_t *tq;
9867 9867 ql_lun_t *lq;
9868 9868 uint16_t index;
9869 9869 int q_sane;
9870 9870
9871 9871 QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9872 9872
9873 9873 /* Loop through all targets. */
9874 9874 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9875 9875 for (link = ha->dev[index].first; link != NULL;
9876 9876 link = next_device) {
9877 9877 tq = link->base_address;
9878 9878
9879 9879 /* Try to acquire device queue lock. */
9880 9880 if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9881 9881 next_device = NULL;
9882 9882 continue;
9883 9883 }
9884 9884
9885 9885 next_device = link->next;
9886 9886
9887 9887 if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9888 9888 (tq->port_down_retry_count == 0)) {
9889 9889 /* Release device queue lock. */
9890 9890 DEVICE_QUEUE_UNLOCK(tq);
9891 9891 continue;
9892 9892 }
9893 9893
9894 9894 /* Find out if this device is in a sane state. */
9895 9895 if (tq->flags & (TQF_RSCN_RCVD |
9896 9896 TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9897 9897 q_sane = 0;
9898 9898 } else {
9899 9899 q_sane = 1;
9900 9900 }
9901 9901 /* Loop through commands on watchdog queue. */
9902 9902 for (link = tq->wdg.first; link != NULL;
9903 9903 link = next_cmd) {
9904 9904 next_cmd = link->next;
9905 9905 sp = link->base_address;
9906 9906 lq = sp->lun_queue;
9907 9907
9908 9908 /*
9909 9909 * For SCSI commands, if everything seems to
9910 9910 * be going fine and this packet is stuck
9911 9911 * because of throttling at LUN or target
9912 9912 * level then do not decrement the
9913 9913 * sp->wdg_q_time
9914 9914 */
9915 9915 if (ha->task_daemon_flags & STATE_ONLINE &&
9916 9916 (sp->flags & SRB_ISP_STARTED) == 0 &&
9917 9917 q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9918 9918 lq->lun_outcnt >= ha->execution_throttle) {
9919 9919 continue;
9920 9920 }
9921 9921
9922 9922 if (sp->wdg_q_time != 0) {
9923 9923 sp->wdg_q_time--;
9924 9924
9925 9925 /* Timeout? */
9926 9926 if (sp->wdg_q_time != 0) {
9927 9927 continue;
9928 9928 }
9929 9929
9930 9930 ql_remove_link(&tq->wdg, &sp->wdg);
9931 9931 sp->flags &= ~SRB_WATCHDOG_ENABLED;
9932 9932
9933 9933 if (sp->flags & SRB_ISP_STARTED) {
9934 9934 ql_cmd_timeout(ha, tq, sp,
9935 9935 set_flags, reset_flags);
9936 9936
9937 9937 DEVICE_QUEUE_UNLOCK(tq);
9938 9938 tq = NULL;
9939 9939 next_cmd = NULL;
9940 9940 next_device = NULL;
9941 9941 index = DEVICE_HEAD_LIST_SIZE;
9942 9942 } else {
9943 9943 ql_cmd_timeout(ha, tq, sp,
9944 9944 set_flags, reset_flags);
9945 9945 }
9946 9946 }
9947 9947 }
9948 9948
9949 9949 /* Release device queue lock. */
9950 9950 if (tq != NULL) {
9951 9951 DEVICE_QUEUE_UNLOCK(tq);
9952 9952 }
9953 9953 }
9954 9954 }
9955 9955
9956 9956 QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9957 9957 }
9958 9958
9959 9959 /*
9960 9960 * ql_cmd_timeout
9961 9961 * Command timeout handler.
9962 9962 *
9963 9963 * Input:
9964 9964 * ha: adapter state pointer.
9965 9965 * tq: target queue pointer.
9966 9966 * sp: SRB pointer.
9967 9967 * set_flags: task daemon flags to set.
9968 9968 * reset_flags: task daemon flags to reset.
9969 9969 *
9970 9970 * Context:
9971 9971 * Interrupt context, no mailbox commands allowed.
9972 9972 */
9973 9973 /* ARGSUSED */
9974 9974 static void
9975 9975 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9976 9976 uint32_t *set_flags, uint32_t *reset_flags)
9977 9977 {
9978 9978 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9979 9979
9980 9980 if (!(sp->flags & SRB_ISP_STARTED)) {
9981 9981
9982 9982 EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9983 9983
9984 9984 REQUEST_RING_LOCK(ha);
9985 9985
9986 9986 /* if it's on a queue */
9987 9987 if (sp->cmd.head) {
9988 9988 /*
9989 9989 * The pending_cmds que needs to be
9990 9990 * protected by the ring lock
9991 9991 */
9992 9992 ql_remove_link(sp->cmd.head, &sp->cmd);
9993 9993 }
9994 9994 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9995 9995
9996 9996 /* Release device queue lock. */
9997 9997 REQUEST_RING_UNLOCK(ha);
9998 9998 DEVICE_QUEUE_UNLOCK(tq);
9999 9999
10000 10000 /* Set timeout status */
10001 10001 sp->pkt->pkt_reason = CS_TIMEOUT;
10002 10002
10003 10003 /* Ensure no retry */
10004 10004 sp->flags &= ~SRB_RETRY;
10005 10005
10006 10006 /* Call done routine to handle completion. */
10007 10007 ql_done(&sp->cmd);
10008 10008
10009 10009 DEVICE_QUEUE_LOCK(tq);
10010 10010 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
10011 10011 int rval;
10012 10012 uint32_t index;
10013 10013
10014 10014 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10015 10015 "spf=%xh\n", (void *)sp,
10016 10016 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10017 10017 sp->handle & OSC_INDEX_MASK, sp->flags);
10018 10018
10019 10019 DEVICE_QUEUE_UNLOCK(tq);
10020 10020
10021 10021 INTR_LOCK(ha);
10022 10022 ha->pha->xioctl->ControllerErrorCount++;
10023 10023 if (sp->handle) {
10024 10024 ha->pha->timeout_cnt++;
10025 10025 index = sp->handle & OSC_INDEX_MASK;
10026 10026 if (ha->pha->outstanding_cmds[index] == sp) {
10027 10027 sp->request_ring_ptr->entry_type =
10028 10028 INVALID_ENTRY_TYPE;
10029 10029 sp->request_ring_ptr->entry_count = 0;
10030 10030 ha->pha->outstanding_cmds[index] = 0;
10031 10031 }
10032 10032 INTR_UNLOCK(ha);
10033 10033
10034 10034 rval = ql_abort_command(ha, sp);
10035 10035 if (rval == QL_FUNCTION_TIMEOUT ||
10036 10036 rval == QL_LOCK_TIMEOUT ||
10037 10037 rval == QL_FUNCTION_PARAMETER_ERROR ||
10038 10038 ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
10039 10039 *set_flags |= ISP_ABORT_NEEDED;
10040 10040 EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
10041 10041 "needed\n", rval, ha->pha->timeout_cnt);
10042 10042 }
10043 10043
10044 10044 sp->handle = 0;
10045 10045 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10046 10046 } else {
10047 10047 INTR_UNLOCK(ha);
10048 10048 }
10049 10049
10050 10050 /* Set timeout status */
10051 10051 sp->pkt->pkt_reason = CS_TIMEOUT;
10052 10052
10053 10053 /* Ensure no retry */
10054 10054 sp->flags &= ~SRB_RETRY;
10055 10055
10056 10056 /* Call done routine to handle completion. */
10057 10057 ql_done(&sp->cmd);
10058 10058
10059 10059 DEVICE_QUEUE_LOCK(tq);
10060 10060
10061 10061 } else {
10062 10062 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10063 10063 "spf=%xh, isp_abort_needed\n", (void *)sp,
10064 10064 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10065 10065 sp->handle & OSC_INDEX_MASK, sp->flags);
10066 10066
10067 10067 /* Release device queue lock. */
10068 10068 DEVICE_QUEUE_UNLOCK(tq);
10069 10069
10070 10070 INTR_LOCK(ha);
10071 10071 ha->pha->xioctl->ControllerErrorCount++;
10072 10072 INTR_UNLOCK(ha);
10073 10073
10074 10074 /* Set ISP needs to be reset */
10075 10075 sp->flags |= SRB_COMMAND_TIMEOUT;
10076 10076
10077 10077 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10078 10078 (void) ql_binary_fw_dump(ha, TRUE);
10079 10079 }
10080 10080
10081 10081 *set_flags |= ISP_ABORT_NEEDED;
10082 10082
10083 10083 DEVICE_QUEUE_LOCK(tq);
10084 10084 }
10085 10085
10086 10086 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10087 10087 }
10088 10088
10089 10089 /*
10090 10090 * ql_rst_aen
10091 10091 * Processes asynchronous reset.
10092 10092 *
10093 10093 * Input:
10094 10094 * ha = adapter state pointer.
10095 10095 *
10096 10096 * Context:
10097 10097 * Kernel context.
10098 10098 */
10099 10099 static void
10100 10100 ql_rst_aen(ql_adapter_state_t *ha)
10101 10101 {
10102 10102 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10103 10103
10104 10104 /* Issue marker command. */
10105 10105 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10106 10106
10107 10107 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10108 10108 }
10109 10109
10110 10110 /*
10111 10111 * ql_cmd_wait
10112 10112 * Stall driver until all outstanding commands are returned.
10113 10113 *
10114 10114 * Input:
10115 10115 * ha = adapter state pointer.
10116 10116 *
10117 10117 * Context:
10118 10118 * Kernel context.
10119 10119 */
10120 10120 void
10121 10121 ql_cmd_wait(ql_adapter_state_t *ha)
10122 10122 {
10123 10123 uint16_t index;
10124 10124 ql_link_t *link;
10125 10125 ql_tgt_t *tq;
10126 10126 ql_adapter_state_t *vha;
10127 10127
10128 10128 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10129 10129
10130 10130 /* Wait for all outstanding commands to be returned. */
10131 10131 (void) ql_wait_outstanding(ha);
10132 10132
10133 10133 /*
10134 10134 * clear out internally queued commands
10135 10135 */
10136 10136 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10137 10137 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10138 10138 for (link = vha->dev[index].first; link != NULL;
10139 10139 link = link->next) {
10140 10140 tq = link->base_address;
10141 10141 if (tq &&
10142 10142 (!(tq->prli_svc_param_word_3 &
10143 10143 PRLI_W3_RETRY))) {
10144 10144 (void) ql_abort_device(vha, tq, 0);
10145 10145 }
10146 10146 }
10147 10147 }
10148 10148 }
10149 10149
10150 10150 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10151 10151 }
10152 10152
10153 10153 /*
10154 10154 * ql_wait_outstanding
10155 10155 * Wait for all outstanding commands to complete.
10156 10156 *
10157 10157 * Input:
10158 10158 * ha = adapter state pointer.
10159 10159 *
10160 10160 * Returns:
10161 10161 * index - the index for ql_srb into outstanding_cmds.
10162 10162 *
10163 10163 * Context:
10164 10164 * Kernel context.
10165 10165 */
10166 10166 static uint16_t
10167 10167 ql_wait_outstanding(ql_adapter_state_t *ha)
10168 10168 {
10169 10169 ql_srb_t *sp;
10170 10170 uint16_t index, count;
10171 10171
10172 10172 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10173 10173
10174 10174 count = ql_osc_wait_count;
10175 10175 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10176 10176 if (ha->pha->pending_cmds.first != NULL) {
10177 10177 ql_start_iocb(ha, NULL);
10178 10178 index = 1;
10179 10179 }
10180 10180 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10181 10181 (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10182 10182 if (count-- != 0) {
10183 10183 ql_delay(ha, 10000);
10184 10184 index = 0;
10185 10185 } else {
10186 10186 EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10187 10187 (void *)sp, index, sp->handle);
10188 10188 break;
10189 10189 }
10190 10190 }
10191 10191 }
10192 10192
10193 10193 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10194 10194
10195 10195 return (index);
10196 10196 }
10197 10197
10198 10198 /*
10199 10199 * ql_restart_queues
10200 10200 * Restart device queues.
10201 10201 *
10202 10202 * Input:
10203 10203 * ha = adapter state pointer.
10204 10204 * DEVICE_QUEUE_LOCK must be released.
10205 10205 *
10206 10206 * Context:
10207 10207 * Interrupt or Kernel context, no mailbox commands allowed.
10208 10208 */
10209 10209 static void
10210 10210 ql_restart_queues(ql_adapter_state_t *ha)
10211 10211 {
10212 10212 ql_link_t *link, *link2;
10213 10213 ql_tgt_t *tq;
10214 10214 ql_lun_t *lq;
10215 10215 uint16_t index;
10216 10216 ql_adapter_state_t *vha;
10217 10217
10218 10218 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10219 10219
10220 10220 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10221 10221 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10222 10222 for (link = vha->dev[index].first; link != NULL;
10223 10223 link = link->next) {
10224 10224 tq = link->base_address;
10225 10225
10226 10226 /* Acquire device queue lock. */
10227 10227 DEVICE_QUEUE_LOCK(tq);
10228 10228
10229 10229 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10230 10230
10231 10231 for (link2 = tq->lun_queues.first;
10232 10232 link2 != NULL; link2 = link2->next) {
10233 10233 lq = link2->base_address;
10234 10234
10235 10235 if (lq->cmd.first != NULL) {
10236 10236 ql_next(vha, lq);
10237 10237 DEVICE_QUEUE_LOCK(tq);
10238 10238 }
10239 10239 }
10240 10240
10241 10241 /* Release device queue lock. */
10242 10242 DEVICE_QUEUE_UNLOCK(tq);
10243 10243 }
10244 10244 }
10245 10245 }
10246 10246
10247 10247 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10248 10248 }
10249 10249
10250 10250 /*
10251 10251 * ql_iidma
10252 10252 * Setup iiDMA parameters to firmware
10253 10253 *
10254 10254 * Input:
10255 10255 * ha = adapter state pointer.
10256 10256 * DEVICE_QUEUE_LOCK must be released.
10257 10257 *
10258 10258 * Context:
10259 10259 * Interrupt or Kernel context, no mailbox commands allowed.
10260 10260 */
10261 10261 static void
10262 10262 ql_iidma(ql_adapter_state_t *ha)
10263 10263 {
10264 10264 ql_link_t *link;
10265 10265 ql_tgt_t *tq;
10266 10266 uint16_t index;
10267 10267 char buf[256];
10268 10268 uint32_t data;
10269 10269
10270 10270 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10271 10271
10272 10272 if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10273 10273 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10274 10274 return;
10275 10275 }
10276 10276
10277 10277 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10278 10278 for (link = ha->dev[index].first; link != NULL;
10279 10279 link = link->next) {
10280 10280 tq = link->base_address;
10281 10281
10282 10282 /* Acquire device queue lock. */
10283 10283 DEVICE_QUEUE_LOCK(tq);
10284 10284
10285 10285 if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10286 10286 DEVICE_QUEUE_UNLOCK(tq);
10287 10287 continue;
10288 10288 }
10289 10289
10290 10290 tq->flags &= ~TQF_IIDMA_NEEDED;
10291 10291
10292 10292 if ((tq->loop_id > LAST_N_PORT_HDL) ||
10293 10293 (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10294 10294 DEVICE_QUEUE_UNLOCK(tq);
10295 10295 continue;
10296 10296 }
10297 10297
10298 10298 /* Get the iiDMA persistent data */
10299 10299 if (tq->iidma_rate == IIDMA_RATE_INIT) {
10300 10300 (void) sprintf(buf,
10301 10301 "iidma-rate-%02x%02x%02x%02x%02x"
10302 10302 "%02x%02x%02x", tq->port_name[0],
10303 10303 tq->port_name[1], tq->port_name[2],
10304 10304 tq->port_name[3], tq->port_name[4],
10305 10305 tq->port_name[5], tq->port_name[6],
10306 10306 tq->port_name[7]);
10307 10307
10308 10308 if ((data = ql_get_prop(ha, buf)) ==
10309 10309 0xffffffff) {
10310 10310 tq->iidma_rate = IIDMA_RATE_NDEF;
10311 10311 } else {
10312 10312 switch (data) {
10313 10313 case IIDMA_RATE_1GB:
10314 10314 case IIDMA_RATE_2GB:
10315 10315 case IIDMA_RATE_4GB:
10316 10316 case IIDMA_RATE_10GB:
10317 10317 tq->iidma_rate = data;
10318 10318 break;
10319 10319 case IIDMA_RATE_8GB:
10320 10320 if (CFG_IST(ha,
10321 10321 CFG_CTRL_25XX)) {
10322 10322 tq->iidma_rate = data;
10323 10323 } else {
10324 10324 tq->iidma_rate =
10325 10325 IIDMA_RATE_4GB;
10326 10326 }
10327 10327 break;
10328 10328 default:
10329 10329 EL(ha, "invalid data for "
10330 10330 "parameter: %s: %xh\n",
10331 10331 buf, data);
10332 10332 tq->iidma_rate =
10333 10333 IIDMA_RATE_NDEF;
10334 10334 break;
10335 10335 }
10336 10336 }
10337 10337 }
10338 10338
10339 10339 /* Set the firmware's iiDMA rate */
10340 10340 if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10341 10341 !(CFG_IST(ha, CFG_CTRL_8081))) {
10342 10342 data = ql_iidma_rate(ha, tq->loop_id,
10343 10343 &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10344 10344 if (data != QL_SUCCESS) {
10345 10345 EL(ha, "mbx failed: %xh\n", data);
10346 10346 }
10347 10347 }
10348 10348
10349 10349 /* Release device queue lock. */
10350 10350 DEVICE_QUEUE_UNLOCK(tq);
10351 10351 }
10352 10352 }
10353 10353
10354 10354 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10355 10355 }
10356 10356
10357 10357 /*
10358 10358 * ql_abort_queues
10359 10359 * Abort all commands on device queues.
10360 10360 *
10361 10361 * Input:
10362 10362 * ha = adapter state pointer.
10363 10363 *
10364 10364 * Context:
10365 10365 * Interrupt or Kernel context, no mailbox commands allowed.
10366 10366 */
10367 10367 static void
10368 10368 ql_abort_queues(ql_adapter_state_t *ha)
10369 10369 {
10370 10370 ql_link_t *link;
10371 10371 ql_tgt_t *tq;
10372 10372 ql_srb_t *sp;
10373 10373 uint16_t index;
10374 10374 ql_adapter_state_t *vha;
10375 10375
10376 10376 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10377 10377
10378 10378 /* Return all commands in outstanding command list. */
10379 10379 INTR_LOCK(ha);
10380 10380
10381 10381 /* Place all commands in outstanding cmd list on device queue. */
10382 10382 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10383 10383 if (ha->pending_cmds.first != NULL) {
10384 10384 INTR_UNLOCK(ha);
10385 10385 ql_start_iocb(ha, NULL);
10386 10386 /* Delay for system */
10387 10387 ql_delay(ha, 10000);
10388 10388 INTR_LOCK(ha);
10389 10389 index = 1;
10390 10390 }
10391 10391 sp = ha->outstanding_cmds[index];
10392 10392
10393 10393 /* skip devices capable of FCP2 retrys */
10394 10394 if ((sp != NULL) &&
10395 10395 ((tq = sp->lun_queue->target_queue) != NULL) &&
10396 10396 (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10397 10397 ha->outstanding_cmds[index] = NULL;
10398 10398 sp->handle = 0;
10399 10399 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10400 10400
10401 10401 INTR_UNLOCK(ha);
10402 10402
10403 10403 /* Set ending status. */
10404 10404 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10405 10405 sp->flags |= SRB_ISP_COMPLETED;
10406 10406
10407 10407 /* Call done routine to handle completions. */
10408 10408 sp->cmd.next = NULL;
10409 10409 ql_done(&sp->cmd);
10410 10410
10411 10411 INTR_LOCK(ha);
10412 10412 }
10413 10413 }
10414 10414 INTR_UNLOCK(ha);
10415 10415
10416 10416 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10417 10417 QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10418 10418 vha->instance, vha->vp_index);
10419 10419 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10420 10420 for (link = vha->dev[index].first; link != NULL;
10421 10421 link = link->next) {
10422 10422 tq = link->base_address;
10423 10423 /* skip devices capable of FCP2 retrys */
10424 10424 if (!(tq->prli_svc_param_word_3 &
10425 10425 PRLI_W3_RETRY)) {
10426 10426 /*
10427 10427 * Set port unavailable status and
10428 10428 * return all commands on a devices
10429 10429 * queues.
10430 10430 */
10431 10431 ql_abort_device_queues(ha, tq);
10432 10432 }
10433 10433 }
10434 10434 }
10435 10435 }
10436 10436 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10437 10437 }
10438 10438
10439 10439 /*
10440 10440 * ql_abort_device_queues
10441 10441 * Abort all commands on device queues.
10442 10442 *
10443 10443 * Input:
10444 10444 * ha = adapter state pointer.
10445 10445 *
10446 10446 * Context:
10447 10447 * Interrupt or Kernel context, no mailbox commands allowed.
10448 10448 */
10449 10449 static void
10450 10450 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10451 10451 {
10452 10452 ql_link_t *lun_link, *cmd_link;
10453 10453 ql_srb_t *sp;
10454 10454 ql_lun_t *lq;
10455 10455
10456 10456 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10457 10457
10458 10458 DEVICE_QUEUE_LOCK(tq);
10459 10459
10460 10460 for (lun_link = tq->lun_queues.first; lun_link != NULL;
10461 10461 lun_link = lun_link->next) {
10462 10462 lq = lun_link->base_address;
10463 10463
10464 10464 cmd_link = lq->cmd.first;
10465 10465 while (cmd_link != NULL) {
10466 10466 sp = cmd_link->base_address;
10467 10467
10468 10468 if (sp->flags & SRB_ABORT) {
10469 10469 cmd_link = cmd_link->next;
10470 10470 continue;
10471 10471 }
10472 10472
10473 10473 /* Remove srb from device cmd queue. */
10474 10474 ql_remove_link(&lq->cmd, &sp->cmd);
10475 10475
10476 10476 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10477 10477
10478 10478 DEVICE_QUEUE_UNLOCK(tq);
10479 10479
10480 10480 /* Set ending status. */
10481 10481 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10482 10482
10483 10483 /* Call done routine to handle completion. */
10484 10484 ql_done(&sp->cmd);
10485 10485
10486 10486 /* Delay for system */
10487 10487 ql_delay(ha, 10000);
10488 10488
10489 10489 DEVICE_QUEUE_LOCK(tq);
10490 10490 cmd_link = lq->cmd.first;
10491 10491 }
10492 10492 }
10493 10493 DEVICE_QUEUE_UNLOCK(tq);
10494 10494
10495 10495 QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10496 10496 }
10497 10497
10498 10498 /*
10499 10499 * ql_loop_resync
10500 10500 * Resync with fibre channel devices.
10501 10501 *
10502 10502 * Input:
10503 10503 * ha = adapter state pointer.
10504 10504 * DEVICE_QUEUE_LOCK must be released.
10505 10505 *
10506 10506 * Returns:
10507 10507 * ql local function return status code.
10508 10508 *
10509 10509 * Context:
10510 10510 * Kernel context.
10511 10511 */
10512 10512 static int
10513 10513 ql_loop_resync(ql_adapter_state_t *ha)
10514 10514 {
10515 10515 int rval;
10516 10516
10517 10517 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10518 10518
10519 10519 if (ha->flags & IP_INITIALIZED) {
10520 10520 (void) ql_shutdown_ip(ha);
10521 10521 }
10522 10522
10523 10523 rval = ql_fw_ready(ha, 10);
10524 10524
10525 10525 TASK_DAEMON_LOCK(ha);
10526 10526 ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10527 10527 TASK_DAEMON_UNLOCK(ha);
10528 10528
10529 10529 /* Set loop online, if it really is. */
10530 10530 if (rval == QL_SUCCESS) {
10531 10531 ql_loop_online(ha);
10532 10532 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10533 10533 } else {
10534 10534 EL(ha, "failed, rval = %xh\n", rval);
10535 10535 }
10536 10536
10537 10537 return (rval);
10538 10538 }
10539 10539
10540 10540 /*
10541 10541 * ql_loop_online
10542 10542 * Set loop online status if it really is online.
10543 10543 *
10544 10544 * Input:
10545 10545 * ha = adapter state pointer.
10546 10546 * DEVICE_QUEUE_LOCK must be released.
10547 10547 *
10548 10548 * Context:
10549 10549 * Kernel context.
10550 10550 */
10551 10551 void
10552 10552 ql_loop_online(ql_adapter_state_t *ha)
10553 10553 {
10554 10554 ql_adapter_state_t *vha;
10555 10555
10556 10556 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10557 10557
10558 10558 /* Inform the FC Transport that the hardware is online. */
10559 10559 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10560 10560 if (!(vha->task_daemon_flags &
10561 10561 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10562 10562 /* Restart IP if it was shutdown. */
10563 10563 if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10564 10564 !(vha->flags & IP_INITIALIZED)) {
10565 10565 (void) ql_initialize_ip(vha);
10566 10566 ql_isp_rcvbuf(vha);
10567 10567 }
10568 10568
10569 10569 if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10570 10570 FC_PORT_STATE_MASK(vha->state) !=
10571 10571 FC_STATE_ONLINE) {
10572 10572 vha->state = FC_PORT_SPEED_MASK(vha->state);
10573 10573 if (vha->topology & QL_LOOP_CONNECTION) {
10574 10574 vha->state |= FC_STATE_LOOP;
10575 10575 } else {
10576 10576 vha->state |= FC_STATE_ONLINE;
10577 10577 }
10578 10578 TASK_DAEMON_LOCK(ha);
10579 10579 vha->task_daemon_flags |= FC_STATE_CHANGE;
10580 10580 TASK_DAEMON_UNLOCK(ha);
10581 10581 }
10582 10582 }
10583 10583 }
10584 10584
10585 10585 ql_awaken_task_daemon(ha, NULL, 0, 0);
10586 10586
10587 10587 /* Restart device queues that may have been stopped. */
10588 10588 ql_restart_queues(ha);
10589 10589
10590 10590 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10591 10591 }
10592 10592
10593 10593 /*
10594 10594 * ql_fca_handle_to_state
10595 10595 * Verifies handle to be correct.
10596 10596 *
10597 10597 * Input:
10598 10598 * fca_handle = pointer to state structure.
10599 10599 *
10600 10600 * Returns:
10601 10601 * NULL = failure
10602 10602 *
10603 10603 * Context:
10604 10604 * Kernel context.
10605 10605 */
10606 10606 static ql_adapter_state_t *
10607 10607 ql_fca_handle_to_state(opaque_t fca_handle)
10608 10608 {
10609 10609 #ifdef QL_DEBUG_ROUTINES
10610 10610 ql_link_t *link;
10611 10611 ql_adapter_state_t *ha = NULL;
10612 10612 ql_adapter_state_t *vha = NULL;
10613 10613
10614 10614 for (link = ql_hba.first; link != NULL; link = link->next) {
10615 10615 ha = link->base_address;
10616 10616 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10617 10617 if ((opaque_t)vha == fca_handle) {
10618 10618 ha = vha;
10619 10619 break;
10620 10620 }
10621 10621 }
10622 10622 if ((opaque_t)ha == fca_handle) {
10623 10623 break;
10624 10624 } else {
10625 10625 ha = NULL;
10626 10626 }
10627 10627 }
10628 10628
10629 10629 if (ha == NULL) {
10630 10630 /*EMPTY*/
10631 10631 QL_PRINT_2(CE_CONT, "failed\n");
10632 10632 }
10633 10633
10634 10634 #endif /* QL_DEBUG_ROUTINES */
10635 10635
10636 10636 return ((ql_adapter_state_t *)fca_handle);
10637 10637 }
10638 10638
10639 10639 /*
10640 10640 * ql_d_id_to_queue
10641 10641 * Locate device queue that matches destination ID.
10642 10642 *
10643 10643 * Input:
10644 10644 * ha = adapter state pointer.
10645 10645 * d_id = destination ID
10646 10646 *
10647 10647 * Returns:
10648 10648 * NULL = failure
10649 10649 *
10650 10650 * Context:
10651 10651 * Interrupt or Kernel context, no mailbox commands allowed.
10652 10652 */
10653 10653 ql_tgt_t *
10654 10654 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10655 10655 {
10656 10656 uint16_t index;
10657 10657 ql_tgt_t *tq;
10658 10658 ql_link_t *link;
10659 10659
10660 10660 /* Get head queue index. */
10661 10661 index = ql_alpa_to_index[d_id.b.al_pa];
10662 10662
10663 10663 for (link = ha->dev[index].first; link != NULL; link = link->next) {
10664 10664 tq = link->base_address;
10665 10665 if (tq->d_id.b24 == d_id.b24 &&
10666 10666 VALID_DEVICE_ID(ha, tq->loop_id)) {
10667 10667 return (tq);
10668 10668 }
10669 10669 }
10670 10670
10671 10671 return (NULL);
10672 10672 }
10673 10673
10674 10674 /*
10675 10675 * ql_loop_id_to_queue
10676 10676 * Locate device queue that matches loop ID.
10677 10677 *
10678 10678 * Input:
10679 10679 * ha: adapter state pointer.
10680 10680 * loop_id: destination ID
10681 10681 *
10682 10682 * Returns:
10683 10683 * NULL = failure
10684 10684 *
10685 10685 * Context:
10686 10686 * Interrupt or Kernel context, no mailbox commands allowed.
10687 10687 */
10688 10688 ql_tgt_t *
10689 10689 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10690 10690 {
10691 10691 uint16_t index;
10692 10692 ql_tgt_t *tq;
10693 10693 ql_link_t *link;
10694 10694
10695 10695 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10696 10696 for (link = ha->dev[index].first; link != NULL;
10697 10697 link = link->next) {
10698 10698 tq = link->base_address;
10699 10699 if (tq->loop_id == loop_id) {
10700 10700 return (tq);
10701 10701 }
10702 10702 }
10703 10703 }
10704 10704
10705 10705 return (NULL);
10706 10706 }
10707 10707
10708 10708 /*
10709 10709 * ql_kstat_update
10710 10710 * Updates kernel statistics.
10711 10711 *
10712 10712 * Input:
10713 10713 * ksp - driver kernel statistics structure pointer.
10714 10714 * rw - function to perform
10715 10715 *
10716 10716 * Returns:
10717 10717 * 0 or EACCES
10718 10718 *
10719 10719 * Context:
10720 10720 * Kernel context.
10721 10721 */
10722 10722 /* ARGSUSED */
10723 10723 static int
10724 10724 ql_kstat_update(kstat_t *ksp, int rw)
10725 10725 {
10726 10726 int rval;
10727 10727
10728 10728 QL_PRINT_3(CE_CONT, "started\n");
10729 10729
10730 10730 if (rw == KSTAT_WRITE) {
10731 10731 rval = EACCES;
10732 10732 } else {
10733 10733 rval = 0;
10734 10734 }
10735 10735
10736 10736 if (rval != 0) {
10737 10737 /*EMPTY*/
10738 10738 QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10739 10739 } else {
10740 10740 /*EMPTY*/
10741 10741 QL_PRINT_3(CE_CONT, "done\n");
10742 10742 }
10743 10743 return (rval);
10744 10744 }
10745 10745
10746 10746 /*
10747 10747 * ql_load_flash
10748 10748 * Loads flash.
10749 10749 *
10750 10750 * Input:
10751 10751 * ha: adapter state pointer.
10752 10752 * dp: data pointer.
10753 10753 * size: data length.
10754 10754 *
10755 10755 * Returns:
10756 10756 * ql local function return status code.
10757 10757 *
10758 10758 * Context:
10759 10759 * Kernel context.
10760 10760 */
10761 10761 int
10762 10762 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10763 10763 {
10764 10764 uint32_t cnt;
10765 10765 int rval;
10766 10766 uint32_t size_to_offset;
10767 10767 uint32_t size_to_compare;
10768 10768 int erase_all;
10769 10769
10770 10770 if (CFG_IST(ha, CFG_CTRL_24258081)) {
10771 10771 return (ql_24xx_load_flash(ha, dp, size, 0));
10772 10772 }
10773 10773
10774 10774 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10775 10775
10776 10776 size_to_compare = 0x20000;
10777 10777 size_to_offset = 0;
10778 10778 erase_all = 0;
10779 10779 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10780 10780 if (size == 0x80000) {
10781 10781 /* Request to flash the entire chip. */
10782 10782 size_to_compare = 0x80000;
10783 10783 erase_all = 1;
10784 10784 } else {
10785 10785 size_to_compare = 0x40000;
10786 10786 if (ql_flash_sbus_fpga) {
10787 10787 size_to_offset = 0x40000;
10788 10788 }
10789 10789 }
10790 10790 }
10791 10791 if (size > size_to_compare) {
10792 10792 rval = QL_FUNCTION_PARAMETER_ERROR;
10793 10793 EL(ha, "failed=%xh\n", rval);
10794 10794 return (rval);
10795 10795 }
10796 10796
10797 10797 GLOBAL_HW_LOCK();
10798 10798
10799 10799 /* Enable Flash Read/Write. */
10800 10800 ql_flash_enable(ha);
10801 10801
10802 10802 /* Erase flash prior to write. */
10803 10803 rval = ql_erase_flash(ha, erase_all);
10804 10804
10805 10805 if (rval == QL_SUCCESS) {
10806 10806 /* Write data to flash. */
10807 10807 for (cnt = 0; cnt < size; cnt++) {
10808 10808 /* Allow other system activity. */
10809 10809 if (cnt % 0x1000 == 0) {
10810 10810 ql_delay(ha, 10000);
10811 10811 }
10812 10812 rval = ql_program_flash_address(ha,
10813 10813 cnt + size_to_offset, *dp++);
10814 10814 if (rval != QL_SUCCESS) {
10815 10815 break;
10816 10816 }
10817 10817 }
10818 10818 }
10819 10819
10820 10820 ql_flash_disable(ha);
10821 10821
10822 10822 GLOBAL_HW_UNLOCK();
10823 10823
10824 10824 if (rval != QL_SUCCESS) {
10825 10825 EL(ha, "failed=%xh\n", rval);
10826 10826 } else {
10827 10827 /*EMPTY*/
10828 10828 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10829 10829 }
10830 10830 return (rval);
10831 10831 }
10832 10832
10833 10833 /*
10834 10834 * ql_program_flash_address
10835 10835 * Program flash address.
10836 10836 *
10837 10837 * Input:
10838 10838 * ha = adapter state pointer.
10839 10839 * addr = flash byte address.
10840 10840 * data = data to be written to flash.
10841 10841 *
10842 10842 * Returns:
10843 10843 * ql local function return status code.
10844 10844 *
10845 10845 * Context:
10846 10846 * Kernel context.
10847 10847 */
10848 10848 static int
10849 10849 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10850 10850 {
10851 10851 int rval;
10852 10852
10853 10853 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10854 10854
10855 10855 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10856 10856 ql_write_flash_byte(ha, 0x5555, 0xa0);
10857 10857 ql_write_flash_byte(ha, addr, data);
10858 10858 } else {
10859 10859 /* Write Program Command Sequence */
10860 10860 ql_write_flash_byte(ha, 0x5555, 0xaa);
10861 10861 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10862 10862 ql_write_flash_byte(ha, 0x5555, 0xa0);
10863 10863 ql_write_flash_byte(ha, addr, data);
10864 10864 }
10865 10865
10866 10866 /* Wait for write to complete. */
10867 10867 rval = ql_poll_flash(ha, addr, data);
10868 10868
10869 10869 if (rval != QL_SUCCESS) {
10870 10870 EL(ha, "failed=%xh\n", rval);
10871 10871 } else {
10872 10872 /*EMPTY*/
10873 10873 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10874 10874 }
10875 10875 return (rval);
10876 10876 }
10877 10877
10878 10878 /*
10879 10879 * ql_erase_flash
10880 10880 * Erases entire flash.
10881 10881 *
10882 10882 * Input:
10883 10883 * ha = adapter state pointer.
10884 10884 *
10885 10885 * Returns:
10886 10886 * ql local function return status code.
10887 10887 *
10888 10888 * Context:
10889 10889 * Kernel context.
10890 10890 */
10891 10891 int
10892 10892 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10893 10893 {
10894 10894 int rval;
10895 10895 uint32_t erase_delay = 2000000;
10896 10896 uint32_t sStartAddr;
10897 10897 uint32_t ssize;
10898 10898 uint32_t cnt;
10899 10899 uint8_t *bfp;
10900 10900 uint8_t *tmp;
10901 10901
10902 10902 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10903 10903
10904 10904 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10905 10905
10906 10906 if (ql_flash_sbus_fpga == 1) {
10907 10907 ssize = QL_SBUS_FCODE_SIZE;
10908 10908 sStartAddr = QL_FCODE_OFFSET;
10909 10909 } else {
10910 10910 ssize = QL_FPGA_SIZE;
10911 10911 sStartAddr = QL_FPGA_OFFSET;
10912 10912 }
10913 10913
10914 10914 erase_delay = 20000000;
10915 10915
10916 10916 bfp = (uint8_t *)kmem_zalloc(ssize, KM_SLEEP);
10917 10917
10918 10918 /* Save the section of flash we're not updating to buffer */
10919 10919 tmp = bfp;
10920 10920 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10921 10921 /* Allow other system activity. */
10922 10922 if (cnt % 0x1000 == 0) {
10923 10923 ql_delay(ha, 10000);
10924 10924 }
10925 10925 *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10926 10926 }
10927 10927 }
10928 10928
10929 10929 /* Chip Erase Command Sequence */
10930 10930 ql_write_flash_byte(ha, 0x5555, 0xaa);
10931 10931 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10932 10932 ql_write_flash_byte(ha, 0x5555, 0x80);
10933 10933 ql_write_flash_byte(ha, 0x5555, 0xaa);
10934 10934 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10935 10935 ql_write_flash_byte(ha, 0x5555, 0x10);
10936 10936
10937 10937 ql_delay(ha, erase_delay);
10938 10938
10939 10939 /* Wait for erase to complete. */
10940 10940 rval = ql_poll_flash(ha, 0, 0x80);
10941 10941
10942 10942 if (rval != QL_SUCCESS) {
10943 10943 EL(ha, "failed=%xh\n", rval);
10944 10944 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10945 10945 kmem_free(bfp, ssize);
10946 10946 }
10947 10947 return (rval);
10948 10948 }
10949 10949
10950 10950 /* restore the section we saved in the buffer */
10951 10951 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10952 10952 /* Restore the section we saved off */
10953 10953 tmp = bfp;
10954 10954 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10955 10955 /* Allow other system activity. */
10956 10956 if (cnt % 0x1000 == 0) {
10957 10957 ql_delay(ha, 10000);
10958 10958 }
10959 10959 rval = ql_program_flash_address(ha, cnt, *tmp++);
10960 10960 if (rval != QL_SUCCESS) {
10961 10961 break;
10962 10962 }
10963 10963 }
10964 10964
10965 10965 kmem_free(bfp, ssize);
10966 10966 }
10967 10967
10968 10968 if (rval != QL_SUCCESS) {
10969 10969 EL(ha, "failed=%xh\n", rval);
10970 10970 } else {
10971 10971 /*EMPTY*/
10972 10972 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10973 10973 }
10974 10974 return (rval);
10975 10975 }
10976 10976
10977 10977 /*
10978 10978 * ql_poll_flash
10979 10979 * Polls flash for completion.
10980 10980 *
10981 10981 * Input:
10982 10982 * ha = adapter state pointer.
10983 10983 * addr = flash byte address.
10984 10984 * data = data to be polled.
10985 10985 *
10986 10986 * Returns:
10987 10987 * ql local function return status code.
10988 10988 *
10989 10989 * Context:
10990 10990 * Kernel context.
10991 10991 */
10992 10992 int
10993 10993 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10994 10994 {
10995 10995 uint8_t flash_data;
10996 10996 uint32_t cnt;
10997 10997 int rval = QL_FUNCTION_FAILED;
10998 10998
10999 10999 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11000 11000
11001 11001 poll_data = (uint8_t)(poll_data & BIT_7);
11002 11002
11003 11003 /* Wait for 30 seconds for command to finish. */
11004 11004 for (cnt = 30000000; cnt; cnt--) {
11005 11005 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
11006 11006
11007 11007 if ((flash_data & BIT_7) == poll_data) {
11008 11008 rval = QL_SUCCESS;
11009 11009 break;
11010 11010 }
11011 11011 if (flash_data & BIT_5 && cnt > 2) {
11012 11012 cnt = 2;
11013 11013 }
11014 11014 drv_usecwait(1);
11015 11015 }
11016 11016
11017 11017 if (rval != QL_SUCCESS) {
11018 11018 EL(ha, "failed=%xh\n", rval);
11019 11019 } else {
11020 11020 /*EMPTY*/
11021 11021 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11022 11022 }
11023 11023 return (rval);
11024 11024 }
11025 11025
11026 11026 /*
11027 11027 * ql_flash_enable
11028 11028 * Setup flash for reading/writing.
11029 11029 *
11030 11030 * Input:
11031 11031 * ha = adapter state pointer.
11032 11032 *
11033 11033 * Context:
11034 11034 * Kernel context.
11035 11035 */
11036 11036 void
11037 11037 ql_flash_enable(ql_adapter_state_t *ha)
11038 11038 {
11039 11039 uint16_t data;
11040 11040
11041 11041 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11042 11042
11043 11043 /* Enable Flash Read/Write. */
11044 11044 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11045 11045 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11046 11046 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11047 11047 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
11048 11048 ddi_put16(ha->sbus_fpga_dev_handle,
11049 11049 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11050 11050 /* Read reset command sequence */
11051 11051 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11052 11052 ql_write_flash_byte(ha, 0x555, 0x55);
11053 11053 ql_write_flash_byte(ha, 0xaaa, 0x20);
11054 11054 ql_write_flash_byte(ha, 0x555, 0xf0);
11055 11055 } else {
11056 11056 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11057 11057 ISP_FLASH_ENABLE);
11058 11058 WRT16_IO_REG(ha, ctrl_status, data);
11059 11059
11060 11060 /* Read/Reset Command Sequence */
11061 11061 ql_write_flash_byte(ha, 0x5555, 0xaa);
11062 11062 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11063 11063 ql_write_flash_byte(ha, 0x5555, 0xf0);
11064 11064 }
11065 11065 (void) ql_read_flash_byte(ha, 0);
11066 11066
11067 11067 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11068 11068 }
11069 11069
11070 11070 /*
11071 11071 * ql_flash_disable
11072 11072 * Disable flash and allow RISC to run.
11073 11073 *
11074 11074 * Input:
11075 11075 * ha = adapter state pointer.
11076 11076 *
11077 11077 * Context:
11078 11078 * Kernel context.
11079 11079 */
11080 11080 void
11081 11081 ql_flash_disable(ql_adapter_state_t *ha)
11082 11082 {
11083 11083 uint16_t data;
11084 11084
11085 11085 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11086 11086
11087 11087 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11088 11088 /*
11089 11089 * Lock the flash back up.
11090 11090 */
11091 11091 ql_write_flash_byte(ha, 0x555, 0x90);
11092 11092 ql_write_flash_byte(ha, 0x555, 0x0);
11093 11093
11094 11094 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11095 11095 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11096 11096 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11097 11097 ddi_put16(ha->sbus_fpga_dev_handle,
11098 11098 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11099 11099 } else {
11100 11100 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11101 11101 ~ISP_FLASH_ENABLE);
11102 11102 WRT16_IO_REG(ha, ctrl_status, data);
11103 11103 }
11104 11104
11105 11105 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11106 11106 }
11107 11107
11108 11108 /*
11109 11109 * ql_write_flash_byte
11110 11110 * Write byte to flash.
11111 11111 *
11112 11112 * Input:
11113 11113 * ha = adapter state pointer.
11114 11114 * addr = flash byte address.
11115 11115 * data = data to be written.
11116 11116 *
11117 11117 * Context:
11118 11118 * Kernel context.
11119 11119 */
11120 11120 void
11121 11121 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11122 11122 {
11123 11123 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11124 11124 ddi_put16(ha->sbus_fpga_dev_handle,
11125 11125 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11126 11126 LSW(addr));
11127 11127 ddi_put16(ha->sbus_fpga_dev_handle,
11128 11128 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11129 11129 MSW(addr));
11130 11130 ddi_put16(ha->sbus_fpga_dev_handle,
11131 11131 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11132 11132 (uint16_t)data);
11133 11133 } else {
11134 11134 uint16_t bank_select;
11135 11135
11136 11136 /* Setup bit 16 of flash address. */
11137 11137 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11138 11138
11139 11139 if (CFG_IST(ha, CFG_CTRL_6322)) {
11140 11140 bank_select = (uint16_t)(bank_select & ~0xf0);
11141 11141 bank_select = (uint16_t)(bank_select |
11142 11142 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11143 11143 WRT16_IO_REG(ha, ctrl_status, bank_select);
11144 11144 } else {
11145 11145 if (addr & BIT_16 && !(bank_select &
11146 11146 ISP_FLASH_64K_BANK)) {
11147 11147 bank_select = (uint16_t)(bank_select |
11148 11148 ISP_FLASH_64K_BANK);
11149 11149 WRT16_IO_REG(ha, ctrl_status, bank_select);
11150 11150 } else if (!(addr & BIT_16) && bank_select &
11151 11151 ISP_FLASH_64K_BANK) {
11152 11152 bank_select = (uint16_t)(bank_select &
11153 11153 ~ISP_FLASH_64K_BANK);
11154 11154 WRT16_IO_REG(ha, ctrl_status, bank_select);
11155 11155 }
11156 11156 }
11157 11157
11158 11158 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11159 11159 WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11160 11160 WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11161 11161 } else {
11162 11162 WRT16_IOMAP_REG(ha, flash_address, addr);
11163 11163 WRT16_IOMAP_REG(ha, flash_data, data);
11164 11164 }
11165 11165 }
11166 11166 }
11167 11167
11168 11168 /*
11169 11169 * ql_read_flash_byte
11170 11170 * Reads byte from flash, but must read a word from chip.
11171 11171 *
11172 11172 * Input:
11173 11173 * ha = adapter state pointer.
11174 11174 * addr = flash byte address.
11175 11175 *
11176 11176 * Returns:
11177 11177 * byte from flash.
11178 11178 *
11179 11179 * Context:
11180 11180 * Kernel context.
11181 11181 */
11182 11182 uint8_t
11183 11183 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11184 11184 {
11185 11185 uint8_t data;
11186 11186
11187 11187 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11188 11188 ddi_put16(ha->sbus_fpga_dev_handle,
11189 11189 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11190 11190 LSW(addr));
11191 11191 ddi_put16(ha->sbus_fpga_dev_handle,
11192 11192 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11193 11193 MSW(addr));
11194 11194 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11195 11195 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11196 11196 } else {
11197 11197 uint16_t bank_select;
11198 11198
11199 11199 /* Setup bit 16 of flash address. */
11200 11200 bank_select = RD16_IO_REG(ha, ctrl_status);
11201 11201 if (CFG_IST(ha, CFG_CTRL_6322)) {
11202 11202 bank_select = (uint16_t)(bank_select & ~0xf0);
11203 11203 bank_select = (uint16_t)(bank_select |
11204 11204 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11205 11205 WRT16_IO_REG(ha, ctrl_status, bank_select);
11206 11206 } else {
11207 11207 if (addr & BIT_16 &&
11208 11208 !(bank_select & ISP_FLASH_64K_BANK)) {
11209 11209 bank_select = (uint16_t)(bank_select |
11210 11210 ISP_FLASH_64K_BANK);
11211 11211 WRT16_IO_REG(ha, ctrl_status, bank_select);
11212 11212 } else if (!(addr & BIT_16) &&
11213 11213 bank_select & ISP_FLASH_64K_BANK) {
11214 11214 bank_select = (uint16_t)(bank_select &
11215 11215 ~ISP_FLASH_64K_BANK);
11216 11216 WRT16_IO_REG(ha, ctrl_status, bank_select);
11217 11217 }
11218 11218 }
11219 11219
11220 11220 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11221 11221 WRT16_IO_REG(ha, flash_address, addr);
11222 11222 data = (uint8_t)RD16_IO_REG(ha, flash_data);
11223 11223 } else {
11224 11224 WRT16_IOMAP_REG(ha, flash_address, addr);
11225 11225 data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11226 11226 }
11227 11227 }
11228 11228
11229 11229 return (data);
11230 11230 }
11231 11231
11232 11232 /*
11233 11233 * ql_24xx_flash_id
11234 11234 * Get flash IDs.
11235 11235 *
11236 11236 * Input:
11237 11237 * ha: adapter state pointer.
11238 11238 *
11239 11239 * Returns:
11240 11240 * ql local function return status code.
11241 11241 *
11242 11242 * Context:
11243 11243 * Kernel context.
11244 11244 */
11245 11245 int
11246 11246 ql_24xx_flash_id(ql_adapter_state_t *vha)
11247 11247 {
11248 11248 int rval;
11249 11249 uint32_t fdata = 0;
11250 11250 ql_adapter_state_t *ha = vha->pha;
11251 11251 ql_xioctl_t *xp = ha->xioctl;
11252 11252
11253 11253 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11254 11254
11255 11255 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11256 11256
11257 11257 if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11258 11258 fdata = 0;
11259 11259 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11260 11260 (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11261 11261 }
11262 11262
11263 11263 if (rval != QL_SUCCESS) {
11264 11264 EL(ha, "24xx read_flash failed=%xh\n", rval);
11265 11265 } else if (fdata != 0) {
11266 11266 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11267 11267 xp->fdesc.flash_id = MSB(LSW(fdata));
11268 11268 xp->fdesc.flash_len = LSB(MSW(fdata));
11269 11269 } else {
11270 11270 xp->fdesc.flash_manuf = ATMEL_FLASH;
11271 11271 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11272 11272 xp->fdesc.flash_len = 0;
11273 11273 }
11274 11274
11275 11275 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11276 11276
11277 11277 return (rval);
11278 11278 }
11279 11279
11280 11280 /*
11281 11281 * ql_24xx_load_flash
11282 11282 * Loads flash.
11283 11283 *
11284 11284 * Input:
11285 11285 * ha = adapter state pointer.
11286 11286 * dp = data pointer.
11287 11287 * size = data length in bytes.
11288 11288 * faddr = 32bit word flash byte address.
11289 11289 *
11290 11290 * Returns:
11291 11291 * ql local function return status code.
11292 11292 *
11293 11293 * Context:
11294 11294 * Kernel context.
11295 11295 */
11296 11296 int
11297 11297 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11298 11298 uint32_t faddr)
11299 11299 {
11300 11300 int rval;
11301 11301 uint32_t cnt, rest_addr, fdata, wc;
11302 11302 dma_mem_t dmabuf = {0};
11303 11303 ql_adapter_state_t *ha = vha->pha;
11304 11304 ql_xioctl_t *xp = ha->xioctl;
11305 11305
11306 11306 QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11307 11307 ha->instance, faddr, size);
11308 11308
11309 11309 /* start address must be 32 bit word aligned */
11310 11310 if ((faddr & 0x3) != 0) {
11311 11311 EL(ha, "incorrect buffer size alignment\n");
11312 11312 return (QL_FUNCTION_PARAMETER_ERROR);
11313 11313 }
11314 11314
11315 11315 /* Allocate DMA buffer */
11316 11316 if (CFG_IST(ha, CFG_CTRL_2581)) {
11317 11317 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11318 11318 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11319 11319 QL_SUCCESS) {
11320 11320 EL(ha, "dma alloc failed, rval=%xh\n", rval);
11321 11321 return (rval);
11322 11322 }
11323 11323 }
11324 11324
11325 11325 GLOBAL_HW_LOCK();
11326 11326
11327 11327 /* Enable flash write */
11328 11328 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11329 11329 GLOBAL_HW_UNLOCK();
11330 11330 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11331 11331 ql_free_phys(ha, &dmabuf);
11332 11332 return (rval);
11333 11333 }
11334 11334
11335 11335 /* setup mask of address range within a sector */
11336 11336 rest_addr = (xp->fdesc.block_size - 1) >> 2;
11337 11337
11338 11338 faddr = faddr >> 2; /* flash gets 32 bit words */
11339 11339
11340 11340 /*
11341 11341 * Write data to flash.
11342 11342 */
11343 11343 cnt = 0;
11344 11344 size = (size + 3) >> 2; /* Round up & convert to dwords */
11345 11345
11346 11346 while (cnt < size) {
11347 11347 /* Beginning of a sector? */
11348 11348 if ((faddr & rest_addr) == 0) {
11349 11349 if (CFG_IST(ha, CFG_CTRL_8021)) {
11350 11350 fdata = ha->flash_data_addr | faddr;
11351 11351 rval = ql_8021_rom_erase(ha, fdata);
11352 11352 if (rval != QL_SUCCESS) {
11353 11353 EL(ha, "8021 erase sector status="
11354 11354 "%xh, start=%xh, end=%xh"
11355 11355 "\n", rval, fdata,
11356 11356 fdata + rest_addr);
11357 11357 break;
11358 11358 }
11359 11359 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11360 11360 fdata = ha->flash_data_addr | faddr;
11361 11361 rval = ql_flash_access(ha,
11362 11362 FAC_ERASE_SECTOR, fdata, fdata +
11363 11363 rest_addr, 0);
11364 11364 if (rval != QL_SUCCESS) {
11365 11365 EL(ha, "erase sector status="
11366 11366 "%xh, start=%xh, end=%xh"
11367 11367 "\n", rval, fdata,
11368 11368 fdata + rest_addr);
11369 11369 break;
11370 11370 }
11371 11371 } else {
11372 11372 fdata = (faddr & ~rest_addr) << 2;
11373 11373 fdata = (fdata & 0xff00) |
11374 11374 (fdata << 16 & 0xff0000) |
11375 11375 (fdata >> 16 & 0xff);
11376 11376
11377 11377 if (rest_addr == 0x1fff) {
11378 11378 /* 32kb sector block erase */
11379 11379 rval = ql_24xx_write_flash(ha,
11380 11380 FLASH_CONF_ADDR | 0x0352,
11381 11381 fdata);
11382 11382 } else {
11383 11383 /* 64kb sector block erase */
11384 11384 rval = ql_24xx_write_flash(ha,
11385 11385 FLASH_CONF_ADDR | 0x03d8,
11386 11386 fdata);
11387 11387 }
11388 11388 if (rval != QL_SUCCESS) {
11389 11389 EL(ha, "Unable to flash sector"
11390 11390 ": address=%xh\n", faddr);
11391 11391 break;
11392 11392 }
11393 11393 }
11394 11394 }
11395 11395
11396 11396 /* Write data */
11397 11397 if (CFG_IST(ha, CFG_CTRL_2581) &&
11398 11398 ((faddr & 0x3f) == 0)) {
11399 11399 /*
11400 11400 * Limit write up to sector boundary.
11401 11401 */
11402 11402 wc = ((~faddr & (rest_addr>>1)) + 1);
11403 11403
11404 11404 if (size - cnt < wc) {
11405 11405 wc = size - cnt;
11406 11406 }
11407 11407
11408 11408 ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11409 11409 (uint8_t *)dmabuf.bp, wc<<2,
11410 11410 DDI_DEV_AUTOINCR);
11411 11411
11412 11412 rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11413 11413 faddr, dmabuf.cookie.dmac_laddress, wc);
11414 11414 if (rval != QL_SUCCESS) {
11415 11415 EL(ha, "unable to dma to flash "
11416 11416 "address=%xh\n", faddr << 2);
11417 11417 break;
11418 11418 }
11419 11419
11420 11420 cnt += wc;
11421 11421 faddr += wc;
11422 11422 dp += wc << 2;
11423 11423 } else {
11424 11424 fdata = *dp++;
11425 11425 fdata |= *dp++ << 8;
11426 11426 fdata |= *dp++ << 16;
11427 11427 fdata |= *dp++ << 24;
11428 11428 rval = ql_24xx_write_flash(ha,
11429 11429 ha->flash_data_addr | faddr, fdata);
11430 11430 if (rval != QL_SUCCESS) {
11431 11431 EL(ha, "Unable to program flash "
11432 11432 "address=%xh data=%xh\n", faddr,
11433 11433 *dp);
11434 11434 break;
11435 11435 }
11436 11436 cnt++;
11437 11437 faddr++;
11438 11438
11439 11439 /* Allow other system activity. */
11440 11440 if (cnt % 0x1000 == 0) {
11441 11441 ql_delay(ha, 10000);
11442 11442 }
11443 11443 }
11444 11444 }
11445 11445
11446 11446 ql_24xx_protect_flash(ha);
11447 11447
11448 11448 ql_free_phys(ha, &dmabuf);
11449 11449
11450 11450 GLOBAL_HW_UNLOCK();
11451 11451
11452 11452 if (rval != QL_SUCCESS) {
11453 11453 EL(ha, "failed=%xh\n", rval);
11454 11454 } else {
11455 11455 /*EMPTY*/
11456 11456 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11457 11457 }
11458 11458 return (rval);
11459 11459 }
11460 11460
11461 11461 /*
11462 11462 * ql_24xx_read_flash
11463 11463 * Reads a 32bit word from ISP24xx NVRAM/FLASH.
11464 11464 *
11465 11465 * Input:
11466 11466 * ha: adapter state pointer.
11467 11467 * faddr: NVRAM/FLASH address.
11468 11468 * bp: data pointer.
11469 11469 *
11470 11470 * Returns:
11471 11471 * ql local function return status code.
11472 11472 *
11473 11473 * Context:
11474 11474 * Kernel context.
11475 11475 */
11476 11476 int
11477 11477 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11478 11478 {
11479 11479 uint32_t timer;
11480 11480 int rval = QL_SUCCESS;
11481 11481 ql_adapter_state_t *ha = vha->pha;
11482 11482
11483 11483 if (CFG_IST(ha, CFG_CTRL_8021)) {
11484 11484 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11485 11485 EL(ha, "8021 access error\n");
11486 11486 }
11487 11487 return (rval);
11488 11488 }
11489 11489
11490 11490 /* Clear access error flag */
11491 11491 WRT32_IO_REG(ha, ctrl_status,
11492 11492 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11493 11493
11494 11494 WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11495 11495
11496 11496 /* Wait for READ cycle to complete. */
11497 11497 for (timer = 300000; timer; timer--) {
11498 11498 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11499 11499 break;
11500 11500 }
11501 11501 drv_usecwait(10);
11502 11502 }
11503 11503
11504 11504 if (timer == 0) {
11505 11505 EL(ha, "failed, timeout\n");
11506 11506 rval = QL_FUNCTION_TIMEOUT;
11507 11507 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11508 11508 EL(ha, "failed, access error\n");
11509 11509 rval = QL_FUNCTION_FAILED;
11510 11510 }
11511 11511
11512 11512 *bp = RD32_IO_REG(ha, flash_data);
11513 11513
11514 11514 return (rval);
11515 11515 }
11516 11516
11517 11517 /*
11518 11518 * ql_24xx_write_flash
11519 11519 * Writes a 32bit word to ISP24xx NVRAM/FLASH.
11520 11520 *
11521 11521 * Input:
11522 11522 * ha: adapter state pointer.
11523 11523 * addr: NVRAM/FLASH address.
11524 11524 * value: data.
11525 11525 *
11526 11526 * Returns:
11527 11527 * ql local function return status code.
11528 11528 *
11529 11529 * Context:
11530 11530 * Kernel context.
11531 11531 */
11532 11532 int
11533 11533 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11534 11534 {
11535 11535 uint32_t timer, fdata;
11536 11536 int rval = QL_SUCCESS;
11537 11537 ql_adapter_state_t *ha = vha->pha;
11538 11538
11539 11539 if (CFG_IST(ha, CFG_CTRL_8021)) {
11540 11540 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11541 11541 EL(ha, "8021 access error\n");
11542 11542 }
11543 11543 return (rval);
11544 11544 }
11545 11545 /* Clear access error flag */
11546 11546 WRT32_IO_REG(ha, ctrl_status,
11547 11547 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11548 11548
11549 11549 WRT32_IO_REG(ha, flash_data, data);
11550 11550 RD32_IO_REG(ha, flash_data); /* PCI Posting. */
11551 11551 WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11552 11552
11553 11553 /* Wait for Write cycle to complete. */
11554 11554 for (timer = 3000000; timer; timer--) {
11555 11555 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11556 11556 /* Check flash write in progress. */
11557 11557 if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11558 11558 (void) ql_24xx_read_flash(ha,
11559 11559 FLASH_CONF_ADDR | 0x005, &fdata);
11560 11560 if (!(fdata & BIT_0)) {
11561 11561 break;
11562 11562 }
11563 11563 } else {
11564 11564 break;
11565 11565 }
11566 11566 }
11567 11567 drv_usecwait(10);
11568 11568 }
11569 11569 if (timer == 0) {
11570 11570 EL(ha, "failed, timeout\n");
11571 11571 rval = QL_FUNCTION_TIMEOUT;
11572 11572 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11573 11573 EL(ha, "access error\n");
11574 11574 rval = QL_FUNCTION_FAILED;
11575 11575 }
11576 11576
11577 11577 return (rval);
11578 11578 }
11579 11579 /*
11580 11580 * ql_24xx_unprotect_flash
11581 11581 * Enable writes
11582 11582 *
11583 11583 * Input:
11584 11584 * ha: adapter state pointer.
11585 11585 *
11586 11586 * Returns:
11587 11587 * ql local function return status code.
11588 11588 *
11589 11589 * Context:
11590 11590 * Kernel context.
11591 11591 */
11592 11592 int
11593 11593 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11594 11594 {
11595 11595 int rval;
11596 11596 uint32_t fdata;
11597 11597 ql_adapter_state_t *ha = vha->pha;
11598 11598 ql_xioctl_t *xp = ha->xioctl;
11599 11599
11600 11600 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11601 11601
11602 11602 if (CFG_IST(ha, CFG_CTRL_8021)) {
11603 11603 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11604 11604 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11605 11605 if (rval != QL_SUCCESS) {
11606 11606 EL(ha, "8021 access error\n");
11607 11607 }
11608 11608 return (rval);
11609 11609 }
11610 11610 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11611 11611 if (ha->task_daemon_flags & FIRMWARE_UP) {
11612 11612 if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11613 11613 0)) != QL_SUCCESS) {
11614 11614 EL(ha, "status=%xh\n", rval);
11615 11615 }
11616 11616 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11617 11617 ha->instance);
11618 11618 return (rval);
11619 11619 }
11620 11620 } else {
11621 11621 /* Enable flash write. */
11622 11622 WRT32_IO_REG(ha, ctrl_status,
11623 11623 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11624 11624 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11625 11625 }
11626 11626
11627 11627 /*
11628 11628 * Remove block write protection (SST and ST) and
11629 11629 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11630 11630 * Unprotect sectors.
11631 11631 */
11632 11632 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11633 11633 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11634 11634
11635 11635 if (xp->fdesc.unprotect_sector_cmd != 0) {
11636 11636 for (fdata = 0; fdata < 0x10; fdata++) {
11637 11637 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11638 11638 0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11639 11639 }
11640 11640
11641 11641 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11642 11642 xp->fdesc.unprotect_sector_cmd, 0x00400f);
11643 11643 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11644 11644 xp->fdesc.unprotect_sector_cmd, 0x00600f);
11645 11645 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11646 11646 xp->fdesc.unprotect_sector_cmd, 0x00800f);
11647 11647 }
11648 11648
11649 11649 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11650 11650
11651 11651 return (QL_SUCCESS);
11652 11652 }
11653 11653
11654 11654 /*
11655 11655 * ql_24xx_protect_flash
11656 11656 * Disable writes
11657 11657 *
11658 11658 * Input:
11659 11659 * ha: adapter state pointer.
11660 11660 *
11661 11661 * Context:
11662 11662 * Kernel context.
11663 11663 */
11664 11664 void
11665 11665 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11666 11666 {
11667 11667 int rval;
11668 11668 uint32_t fdata;
11669 11669 ql_adapter_state_t *ha = vha->pha;
11670 11670 ql_xioctl_t *xp = ha->xioctl;
11671 11671
11672 11672 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11673 11673
11674 11674 if (CFG_IST(ha, CFG_CTRL_8021)) {
11675 11675 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11676 11676 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11677 11677 if (rval != QL_SUCCESS) {
11678 11678 EL(ha, "8021 access error\n");
11679 11679 }
11680 11680 return;
11681 11681 }
11682 11682 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11683 11683 if (ha->task_daemon_flags & FIRMWARE_UP) {
11684 11684 if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11685 11685 0)) != QL_SUCCESS) {
11686 11686 EL(ha, "status=%xh\n", rval);
11687 11687 }
11688 11688 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11689 11689 ha->instance);
11690 11690 return;
11691 11691 }
11692 11692 } else {
11693 11693 /* Enable flash write. */
11694 11694 WRT32_IO_REG(ha, ctrl_status,
11695 11695 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11696 11696 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11697 11697 }
11698 11698
11699 11699 /*
11700 11700 * Protect sectors.
11701 11701 * Set block write protection (SST and ST) and
11702 11702 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11703 11703 */
11704 11704 if (xp->fdesc.protect_sector_cmd != 0) {
11705 11705 for (fdata = 0; fdata < 0x10; fdata++) {
11706 11706 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11707 11707 0x330 | xp->fdesc.protect_sector_cmd, fdata);
11708 11708 }
11709 11709 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11710 11710 xp->fdesc.protect_sector_cmd, 0x00400f);
11711 11711 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11712 11712 xp->fdesc.protect_sector_cmd, 0x00600f);
11713 11713 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11714 11714 xp->fdesc.protect_sector_cmd, 0x00800f);
11715 11715
11716 11716 /* TODO: ??? */
11717 11717 (void) ql_24xx_write_flash(ha,
11718 11718 FLASH_CONF_ADDR | 0x101, 0x80);
11719 11719 } else {
11720 11720 (void) ql_24xx_write_flash(ha,
11721 11721 FLASH_CONF_ADDR | 0x101, 0x9c);
11722 11722 }
11723 11723
11724 11724 /* Disable flash write. */
11725 11725 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11726 11726 WRT32_IO_REG(ha, ctrl_status,
11727 11727 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11728 11728 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11729 11729 }
11730 11730
11731 11731 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11732 11732 }
11733 11733
11734 11734 /*
11735 11735 * ql_dump_firmware
11736 11736 * Save RISC code state information.
11737 11737 *
11738 11738 * Input:
11739 11739 * ha = adapter state pointer.
11740 11740 *
11741 11741 * Returns:
11742 11742 * QL local function return status code.
11743 11743 *
11744 11744 * Context:
11745 11745 * Kernel context.
11746 11746 */
11747 11747 static int
11748 11748 ql_dump_firmware(ql_adapter_state_t *vha)
11749 11749 {
11750 11750 int rval;
11751 11751 clock_t timer = drv_usectohz(30000000);
11752 11752 ql_adapter_state_t *ha = vha->pha;
11753 11753
11754 11754 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11755 11755
11756 11756 QL_DUMP_LOCK(ha);
11757 11757
11758 11758 if (ha->ql_dump_state & QL_DUMPING ||
11759 11759 (ha->ql_dump_state & QL_DUMP_VALID &&
11760 11760 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11761 11761 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11762 11762 QL_DUMP_UNLOCK(ha);
11763 11763 return (QL_SUCCESS);
11764 11764 }
11765 11765
11766 11766 QL_DUMP_UNLOCK(ha);
11767 11767
11768 11768 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11769 11769
11770 11770 /*
11771 11771 * Wait for all outstanding commands to complete
11772 11772 */
11773 11773 (void) ql_wait_outstanding(ha);
11774 11774
11775 11775 /* Dump firmware. */
11776 11776 rval = ql_binary_fw_dump(ha, TRUE);
11777 11777
11778 11778 /* Do abort to force restart. */
11779 11779 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11780 11780 EL(ha, "restarting, isp_abort_needed\n");
11781 11781
11782 11782 /* Acquire task daemon lock. */
11783 11783 TASK_DAEMON_LOCK(ha);
11784 11784
11785 11785 /* Wait for suspension to end. */
11786 11786 while (ha->task_daemon_flags & QL_SUSPENDED) {
11787 11787 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11788 11788
11789 11789 /* 30 seconds from now */
11790 11790 if (cv_reltimedwait(&ha->cv_dr_suspended,
11791 11791 &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11792 11792 /*
11793 11793 * The timeout time 'timer' was
11794 11794 * reached without the condition
11795 11795 * being signaled.
11796 11796 */
11797 11797 break;
11798 11798 }
11799 11799 }
11800 11800
11801 11801 /* Release task daemon lock. */
11802 11802 TASK_DAEMON_UNLOCK(ha);
11803 11803
11804 11804 if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11805 11805 /*EMPTY*/
11806 11806 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11807 11807 } else {
11808 11808 EL(ha, "failed, rval = %xh\n", rval);
11809 11809 }
11810 11810 return (rval);
11811 11811 }
11812 11812
11813 11813 /*
11814 11814 * ql_binary_fw_dump
11815 11815 * Dumps binary data from firmware.
11816 11816 *
11817 11817 * Input:
11818 11818 * ha = adapter state pointer.
11819 11819 * lock_needed = mailbox lock needed.
11820 11820 *
11821 11821 * Returns:
11822 11822 * ql local function return status code.
11823 11823 *
11824 11824 * Context:
11825 11825 * Interrupt or Kernel context, no mailbox commands allowed.
11826 11826 */
11827 11827 int
11828 11828 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11829 11829 {
11830 11830 clock_t timer;
11831 11831 mbx_cmd_t mc;
11832 11832 mbx_cmd_t *mcp = &mc;
11833 11833 int rval = QL_SUCCESS;
11834 11834 ql_adapter_state_t *ha = vha->pha;
11835 11835
11836 11836 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11837 11837
11838 11838 if (CFG_IST(ha, CFG_CTRL_8021)) {
11839 11839 EL(ha, "8021 not supported\n");
11840 11840 return (QL_NOT_SUPPORTED);
11841 11841 }
11842 11842
11843 11843 QL_DUMP_LOCK(ha);
11844 11844
11845 11845 if (ha->ql_dump_state & QL_DUMPING ||
11846 11846 (ha->ql_dump_state & QL_DUMP_VALID &&
11847 11847 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11848 11848 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11849 11849 QL_DUMP_UNLOCK(ha);
11850 11850 return (QL_DATA_EXISTS);
11851 11851 }
11852 11852
11853 11853 ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11854 11854 ha->ql_dump_state |= QL_DUMPING;
11855 11855
11856 11856 QL_DUMP_UNLOCK(ha);
11857 11857
11858 11858 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11859 11859
11860 11860 /* Insert Time Stamp */
11861 11861 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
↓ open down ↓ |
9875 lines elided |
↑ open up ↑ |
11862 11862 FTO_INSERT_TIME_STAMP);
11863 11863 if (rval != QL_SUCCESS) {
11864 11864 EL(ha, "f/w extended trace insert"
11865 11865 "time stamp failed: %xh\n", rval);
11866 11866 }
11867 11867 }
11868 11868
11869 11869 if (lock_needed == TRUE) {
11870 11870 /* Acquire mailbox register lock. */
11871 11871 MBX_REGISTER_LOCK(ha);
11872 - timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11872 + timer = drv_sectohz(ha->mcp->timeout + 2);
11873 11873
11874 11874 /* Check for mailbox available, if not wait for signal. */
11875 11875 while (ha->mailbox_flags & MBX_BUSY_FLG) {
11876 11876 ha->mailbox_flags = (uint8_t)
11877 11877 (ha->mailbox_flags | MBX_WANT_FLG);
11878 11878
11879 11879 /* 30 seconds from now */
11880 11880 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11881 11881 timer, TR_CLOCK_TICK) == -1) {
11882 11882 /*
11883 11883 * The timeout time 'timer' was
11884 11884 * reached without the condition
11885 11885 * being signaled.
11886 11886 */
11887 11887
11888 11888 /* Release mailbox register lock. */
11889 11889 MBX_REGISTER_UNLOCK(ha);
11890 11890
11891 11891 EL(ha, "failed, rval = %xh\n",
11892 11892 QL_FUNCTION_TIMEOUT);
11893 11893 return (QL_FUNCTION_TIMEOUT);
11894 11894 }
11895 11895 }
11896 11896
11897 11897 /* Set busy flag. */
11898 11898 ha->mailbox_flags = (uint8_t)
11899 11899 (ha->mailbox_flags | MBX_BUSY_FLG);
11900 11900 mcp->timeout = 120;
11901 11901 ha->mcp = mcp;
11902 11902
11903 11903 /* Release mailbox register lock. */
11904 11904 MBX_REGISTER_UNLOCK(ha);
11905 11905 }
11906 11906
11907 11907 /* Free previous dump buffer. */
11908 11908 if (ha->ql_dump_ptr != NULL) {
11909 11909 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11910 11910 ha->ql_dump_ptr = NULL;
11911 11911 }
11912 11912
11913 11913 if (CFG_IST(ha, CFG_CTRL_2422)) {
11914 11914 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11915 11915 ha->fw_ext_memory_size);
11916 11916 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11917 11917 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11918 11918 ha->fw_ext_memory_size);
11919 11919 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11920 11920 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11921 11921 ha->fw_ext_memory_size);
11922 11922 } else {
11923 11923 ha->ql_dump_size = sizeof (ql_fw_dump_t);
11924 11924 }
11925 11925
11926 11926 if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11927 11927 NULL) {
11928 11928 rval = QL_MEMORY_ALLOC_FAILED;
11929 11929 } else {
11930 11930 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11931 11931 rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11932 11932 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11933 11933 rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11934 11934 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11935 11935 rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11936 11936 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
11937 11937 rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11938 11938 } else {
11939 11939 rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11940 11940 }
11941 11941 }
11942 11942
11943 11943 /* Reset ISP chip. */
11944 11944 ql_reset_chip(ha);
11945 11945
11946 11946 QL_DUMP_LOCK(ha);
11947 11947
11948 11948 if (rval != QL_SUCCESS) {
11949 11949 if (ha->ql_dump_ptr != NULL) {
11950 11950 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11951 11951 ha->ql_dump_ptr = NULL;
11952 11952 }
11953 11953 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11954 11954 QL_DUMP_UPLOADED);
11955 11955 EL(ha, "failed, rval = %xh\n", rval);
11956 11956 } else {
11957 11957 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11958 11958 ha->ql_dump_state |= QL_DUMP_VALID;
11959 11959 EL(ha, "done\n");
11960 11960 }
11961 11961
11962 11962 QL_DUMP_UNLOCK(ha);
11963 11963
11964 11964 return (rval);
11965 11965 }
11966 11966
11967 11967 /*
11968 11968 * ql_ascii_fw_dump
11969 11969 * Converts firmware binary dump to ascii.
11970 11970 *
11971 11971 * Input:
11972 11972 * ha = adapter state pointer.
11973 11973 * bptr = buffer pointer.
11974 11974 *
11975 11975 * Returns:
11976 11976 * Amount of data buffer used.
11977 11977 *
11978 11978 * Context:
11979 11979 * Kernel context.
11980 11980 */
11981 11981 size_t
11982 11982 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11983 11983 {
11984 11984 uint32_t cnt;
11985 11985 caddr_t bp;
11986 11986 int mbox_cnt;
11987 11987 ql_adapter_state_t *ha = vha->pha;
11988 11988 ql_fw_dump_t *fw = ha->ql_dump_ptr;
11989 11989
11990 11990 if (CFG_IST(ha, CFG_CTRL_2422)) {
11991 11991 return (ql_24xx_ascii_fw_dump(ha, bufp));
11992 11992 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
11993 11993 return (ql_2581_ascii_fw_dump(ha, bufp));
11994 11994 }
11995 11995
11996 11996 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11997 11997
11998 11998 if (CFG_IST(ha, CFG_CTRL_2300)) {
11999 11999 (void) sprintf(bufp, "\nISP 2300IP ");
12000 12000 } else if (CFG_IST(ha, CFG_CTRL_6322)) {
12001 12001 (void) sprintf(bufp, "\nISP 6322FLX ");
12002 12002 } else {
12003 12003 (void) sprintf(bufp, "\nISP 2200IP ");
12004 12004 }
12005 12005
12006 12006 bp = bufp + strlen(bufp);
12007 12007 (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
12008 12008 ha->fw_major_version, ha->fw_minor_version,
12009 12009 ha->fw_subminor_version);
12010 12010
12011 12011 (void) strcat(bufp, "\nPBIU Registers:");
12012 12012 bp = bufp + strlen(bufp);
12013 12013 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
12014 12014 if (cnt % 8 == 0) {
12015 12015 *bp++ = '\n';
12016 12016 }
12017 12017 (void) sprintf(bp, "%04x ", fw->pbiu_reg[cnt]);
12018 12018 bp = bp + 6;
12019 12019 }
12020 12020
12021 12021 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12022 12022 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
12023 12023 "registers:");
12024 12024 bp = bufp + strlen(bufp);
12025 12025 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
12026 12026 if (cnt % 8 == 0) {
12027 12027 *bp++ = '\n';
12028 12028 }
12029 12029 (void) sprintf(bp, "%04x ", fw->risc_host_reg[cnt]);
12030 12030 bp = bp + 6;
12031 12031 }
12032 12032 }
12033 12033
12034 12034 (void) strcat(bp, "\n\nMailbox Registers:");
12035 12035 bp = bufp + strlen(bufp);
12036 12036 mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
12037 12037 for (cnt = 0; cnt < mbox_cnt; cnt++) {
12038 12038 if (cnt % 8 == 0) {
12039 12039 *bp++ = '\n';
12040 12040 }
12041 12041 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12042 12042 bp = bp + 6;
12043 12043 }
12044 12044
12045 12045 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12046 12046 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
12047 12047 bp = bufp + strlen(bufp);
12048 12048 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12049 12049 if (cnt % 8 == 0) {
12050 12050 *bp++ = '\n';
12051 12051 }
12052 12052 (void) sprintf(bp, "%04x ", fw->resp_dma_reg[cnt]);
12053 12053 bp = bp + 6;
12054 12054 }
12055 12055 }
12056 12056
12057 12057 (void) strcat(bp, "\n\nDMA Registers:");
12058 12058 bp = bufp + strlen(bufp);
12059 12059 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12060 12060 if (cnt % 8 == 0) {
12061 12061 *bp++ = '\n';
12062 12062 }
12063 12063 (void) sprintf(bp, "%04x ", fw->dma_reg[cnt]);
12064 12064 bp = bp + 6;
12065 12065 }
12066 12066
12067 12067 (void) strcat(bp, "\n\nRISC Hardware Registers:");
12068 12068 bp = bufp + strlen(bufp);
12069 12069 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12070 12070 if (cnt % 8 == 0) {
12071 12071 *bp++ = '\n';
12072 12072 }
12073 12073 (void) sprintf(bp, "%04x ", fw->risc_hdw_reg[cnt]);
12074 12074 bp = bp + 6;
12075 12075 }
12076 12076
12077 12077 (void) strcat(bp, "\n\nRISC GP0 Registers:");
12078 12078 bp = bufp + strlen(bufp);
12079 12079 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12080 12080 if (cnt % 8 == 0) {
12081 12081 *bp++ = '\n';
12082 12082 }
12083 12083 (void) sprintf(bp, "%04x ", fw->risc_gp0_reg[cnt]);
12084 12084 bp = bp + 6;
12085 12085 }
12086 12086
12087 12087 (void) strcat(bp, "\n\nRISC GP1 Registers:");
12088 12088 bp = bufp + strlen(bufp);
12089 12089 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12090 12090 if (cnt % 8 == 0) {
12091 12091 *bp++ = '\n';
12092 12092 }
12093 12093 (void) sprintf(bp, "%04x ", fw->risc_gp1_reg[cnt]);
12094 12094 bp = bp + 6;
12095 12095 }
12096 12096
12097 12097 (void) strcat(bp, "\n\nRISC GP2 Registers:");
12098 12098 bp = bufp + strlen(bufp);
12099 12099 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12100 12100 if (cnt % 8 == 0) {
12101 12101 *bp++ = '\n';
12102 12102 }
12103 12103 (void) sprintf(bp, "%04x ", fw->risc_gp2_reg[cnt]);
12104 12104 bp = bp + 6;
12105 12105 }
12106 12106
12107 12107 (void) strcat(bp, "\n\nRISC GP3 Registers:");
12108 12108 bp = bufp + strlen(bufp);
12109 12109 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12110 12110 if (cnt % 8 == 0) {
12111 12111 *bp++ = '\n';
12112 12112 }
12113 12113 (void) sprintf(bp, "%04x ", fw->risc_gp3_reg[cnt]);
12114 12114 bp = bp + 6;
12115 12115 }
12116 12116
12117 12117 (void) strcat(bp, "\n\nRISC GP4 Registers:");
12118 12118 bp = bufp + strlen(bufp);
12119 12119 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12120 12120 if (cnt % 8 == 0) {
12121 12121 *bp++ = '\n';
12122 12122 }
12123 12123 (void) sprintf(bp, "%04x ", fw->risc_gp4_reg[cnt]);
12124 12124 bp = bp + 6;
12125 12125 }
12126 12126
12127 12127 (void) strcat(bp, "\n\nRISC GP5 Registers:");
12128 12128 bp = bufp + strlen(bufp);
12129 12129 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12130 12130 if (cnt % 8 == 0) {
12131 12131 *bp++ = '\n';
12132 12132 }
12133 12133 (void) sprintf(bp, "%04x ", fw->risc_gp5_reg[cnt]);
12134 12134 bp = bp + 6;
12135 12135 }
12136 12136
12137 12137 (void) strcat(bp, "\n\nRISC GP6 Registers:");
12138 12138 bp = bufp + strlen(bufp);
12139 12139 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12140 12140 if (cnt % 8 == 0) {
12141 12141 *bp++ = '\n';
12142 12142 }
12143 12143 (void) sprintf(bp, "%04x ", fw->risc_gp6_reg[cnt]);
12144 12144 bp = bp + 6;
12145 12145 }
12146 12146
12147 12147 (void) strcat(bp, "\n\nRISC GP7 Registers:");
12148 12148 bp = bufp + strlen(bufp);
12149 12149 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12150 12150 if (cnt % 8 == 0) {
12151 12151 *bp++ = '\n';
12152 12152 }
12153 12153 (void) sprintf(bp, "%04x ", fw->risc_gp7_reg[cnt]);
12154 12154 bp = bp + 6;
12155 12155 }
12156 12156
12157 12157 (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12158 12158 bp = bufp + strlen(bufp);
12159 12159 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12160 12160 if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12161 12161 CFG_CTRL_6322)) == 0))) {
12162 12162 break;
12163 12163 }
12164 12164 if (cnt % 8 == 0) {
12165 12165 *bp++ = '\n';
12166 12166 }
12167 12167 (void) sprintf(bp, "%04x ", fw->frame_buf_hdw_reg[cnt]);
12168 12168 bp = bp + 6;
12169 12169 }
12170 12170
12171 12171 (void) strcat(bp, "\n\nFPM B0 Registers:");
12172 12172 bp = bufp + strlen(bufp);
12173 12173 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12174 12174 if (cnt % 8 == 0) {
12175 12175 *bp++ = '\n';
12176 12176 }
12177 12177 (void) sprintf(bp, "%04x ", fw->fpm_b0_reg[cnt]);
12178 12178 bp = bp + 6;
12179 12179 }
12180 12180
12181 12181 (void) strcat(bp, "\n\nFPM B1 Registers:");
12182 12182 bp = bufp + strlen(bufp);
12183 12183 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12184 12184 if (cnt % 8 == 0) {
12185 12185 *bp++ = '\n';
12186 12186 }
12187 12187 (void) sprintf(bp, "%04x ", fw->fpm_b1_reg[cnt]);
12188 12188 bp = bp + 6;
12189 12189 }
12190 12190
12191 12191 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12192 12192 (void) strcat(bp, "\n\nCode RAM Dump:");
12193 12193 bp = bufp + strlen(bufp);
12194 12194 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12195 12195 if (cnt % 8 == 0) {
12196 12196 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12197 12197 bp = bp + 8;
12198 12198 }
12199 12199 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12200 12200 bp = bp + 6;
12201 12201 }
12202 12202
12203 12203 (void) strcat(bp, "\n\nStack RAM Dump:");
12204 12204 bp = bufp + strlen(bufp);
12205 12205 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12206 12206 if (cnt % 8 == 0) {
12207 12207 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12208 12208 bp = bp + 8;
12209 12209 }
12210 12210 (void) sprintf(bp, "%04x ", fw->stack_ram[cnt]);
12211 12211 bp = bp + 6;
12212 12212 }
12213 12213
12214 12214 (void) strcat(bp, "\n\nData RAM Dump:");
12215 12215 bp = bufp + strlen(bufp);
12216 12216 for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12217 12217 if (cnt % 8 == 0) {
12218 12218 (void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12219 12219 bp = bp + 8;
12220 12220 }
12221 12221 (void) sprintf(bp, "%04x ", fw->data_ram[cnt]);
12222 12222 bp = bp + 6;
12223 12223 }
12224 12224 } else {
12225 12225 (void) strcat(bp, "\n\nRISC SRAM:");
12226 12226 bp = bufp + strlen(bufp);
12227 12227 for (cnt = 0; cnt < 0xf000; cnt++) {
12228 12228 if (cnt % 8 == 0) {
12229 12229 (void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12230 12230 bp = bp + 7;
12231 12231 }
12232 12232 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12233 12233 bp = bp + 6;
12234 12234 }
12235 12235 }
12236 12236
12237 12237 (void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12238 12238 bp += strlen(bp);
12239 12239
12240 12240 (void) sprintf(bp, "\n\nRequest Queue");
12241 12241 bp += strlen(bp);
12242 12242 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12243 12243 if (cnt % 8 == 0) {
12244 12244 (void) sprintf(bp, "\n%08x: ", cnt);
12245 12245 bp += strlen(bp);
12246 12246 }
12247 12247 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12248 12248 bp += strlen(bp);
12249 12249 }
12250 12250
12251 12251 (void) sprintf(bp, "\n\nResponse Queue");
12252 12252 bp += strlen(bp);
12253 12253 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12254 12254 if (cnt % 8 == 0) {
12255 12255 (void) sprintf(bp, "\n%08x: ", cnt);
12256 12256 bp += strlen(bp);
12257 12257 }
12258 12258 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12259 12259 bp += strlen(bp);
12260 12260 }
12261 12261
12262 12262 (void) sprintf(bp, "\n");
12263 12263
12264 12264 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12265 12265
12266 12266 return (strlen(bufp));
12267 12267 }
12268 12268
12269 12269 /*
12270 12270 * ql_24xx_ascii_fw_dump
12271 12271 * Converts ISP24xx firmware binary dump to ascii.
12272 12272 *
12273 12273 * Input:
12274 12274 * ha = adapter state pointer.
12275 12275 * bptr = buffer pointer.
12276 12276 *
12277 12277 * Returns:
12278 12278 * Amount of data buffer used.
12279 12279 *
12280 12280 * Context:
12281 12281 * Kernel context.
12282 12282 */
12283 12283 static size_t
12284 12284 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12285 12285 {
12286 12286 uint32_t cnt;
12287 12287 caddr_t bp = bufp;
12288 12288 ql_24xx_fw_dump_t *fw = ha->ql_dump_ptr;
12289 12289
12290 12290 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12291 12291
12292 12292 (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12293 12293 ha->fw_major_version, ha->fw_minor_version,
12294 12294 ha->fw_subminor_version, ha->fw_attributes);
12295 12295 bp += strlen(bp);
12296 12296
12297 12297 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12298 12298
12299 12299 (void) strcat(bp, "\nHost Interface Registers");
12300 12300 bp += strlen(bp);
12301 12301 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12302 12302 if (cnt % 8 == 0) {
12303 12303 (void) sprintf(bp++, "\n");
12304 12304 }
12305 12305
12306 12306 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12307 12307 bp += 9;
12308 12308 }
12309 12309
12310 12310 (void) sprintf(bp, "\n\nMailbox Registers");
12311 12311 bp += strlen(bp);
12312 12312 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12313 12313 if (cnt % 16 == 0) {
12314 12314 (void) sprintf(bp++, "\n");
12315 12315 }
12316 12316
12317 12317 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12318 12318 bp += 5;
12319 12319 }
12320 12320
12321 12321 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12322 12322 bp += strlen(bp);
12323 12323 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12324 12324 if (cnt % 8 == 0) {
12325 12325 (void) sprintf(bp++, "\n");
12326 12326 }
12327 12327
12328 12328 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12329 12329 bp += 9;
12330 12330 }
12331 12331
12332 12332 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12333 12333 bp += strlen(bp);
12334 12334 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12335 12335 if (cnt % 8 == 0) {
12336 12336 (void) sprintf(bp++, "\n");
12337 12337 }
12338 12338
12339 12339 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12340 12340 bp += 9;
12341 12341 }
12342 12342
12343 12343 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12344 12344 bp += strlen(bp);
12345 12345 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12346 12346 if (cnt % 8 == 0) {
12347 12347 (void) sprintf(bp++, "\n");
12348 12348 }
12349 12349
12350 12350 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12351 12351 bp += 9;
12352 12352 }
12353 12353
12354 12354 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12355 12355 bp += strlen(bp);
12356 12356 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12357 12357 if (cnt % 8 == 0) {
12358 12358 (void) sprintf(bp++, "\n");
12359 12359 }
12360 12360
12361 12361 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12362 12362 bp += 9;
12363 12363 }
12364 12364
12365 12365 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12366 12366 bp += strlen(bp);
12367 12367 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12368 12368 if (cnt % 8 == 0) {
12369 12369 (void) sprintf(bp++, "\n");
12370 12370 }
12371 12371
12372 12372 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12373 12373 bp += 9;
12374 12374 }
12375 12375
12376 12376 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12377 12377 bp += strlen(bp);
12378 12378 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12379 12379 if (cnt % 8 == 0) {
12380 12380 (void) sprintf(bp++, "\n");
12381 12381 }
12382 12382
12383 12383 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12384 12384 bp += 9;
12385 12385 }
12386 12386
12387 12387 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12388 12388 bp += strlen(bp);
12389 12389 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12390 12390 if (cnt % 8 == 0) {
12391 12391 (void) sprintf(bp++, "\n");
12392 12392 }
12393 12393
12394 12394 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12395 12395 bp += 9;
12396 12396 }
12397 12397
12398 12398 (void) sprintf(bp, "\n\nCommand DMA Registers");
12399 12399 bp += strlen(bp);
12400 12400 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12401 12401 if (cnt % 8 == 0) {
12402 12402 (void) sprintf(bp++, "\n");
12403 12403 }
12404 12404
12405 12405 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12406 12406 bp += 9;
12407 12407 }
12408 12408
12409 12409 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12410 12410 bp += strlen(bp);
12411 12411 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12412 12412 if (cnt % 8 == 0) {
12413 12413 (void) sprintf(bp++, "\n");
12414 12414 }
12415 12415
12416 12416 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12417 12417 bp += 9;
12418 12418 }
12419 12419
12420 12420 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12421 12421 bp += strlen(bp);
12422 12422 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12423 12423 if (cnt % 8 == 0) {
12424 12424 (void) sprintf(bp++, "\n");
12425 12425 }
12426 12426
12427 12427 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12428 12428 bp += 9;
12429 12429 }
12430 12430
12431 12431 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12432 12432 bp += strlen(bp);
12433 12433 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12434 12434 if (cnt % 8 == 0) {
12435 12435 (void) sprintf(bp++, "\n");
12436 12436 }
12437 12437
12438 12438 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12439 12439 bp += 9;
12440 12440 }
12441 12441
12442 12442 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12443 12443 bp += strlen(bp);
12444 12444 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12445 12445 if (cnt % 8 == 0) {
12446 12446 (void) sprintf(bp++, "\n");
12447 12447 }
12448 12448
12449 12449 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12450 12450 bp += 9;
12451 12451 }
12452 12452
12453 12453 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12454 12454 bp += strlen(bp);
12455 12455 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12456 12456 if (cnt % 8 == 0) {
12457 12457 (void) sprintf(bp++, "\n");
12458 12458 }
12459 12459
12460 12460 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12461 12461 bp += 9;
12462 12462 }
12463 12463
12464 12464 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12465 12465 bp += strlen(bp);
12466 12466 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12467 12467 if (cnt % 8 == 0) {
12468 12468 (void) sprintf(bp++, "\n");
12469 12469 }
12470 12470
12471 12471 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12472 12472 bp += 9;
12473 12473 }
12474 12474
12475 12475 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12476 12476 bp += strlen(bp);
12477 12477 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12478 12478 if (cnt % 8 == 0) {
12479 12479 (void) sprintf(bp++, "\n");
12480 12480 }
12481 12481
12482 12482 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12483 12483 bp += 9;
12484 12484 }
12485 12485
12486 12486 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12487 12487 bp += strlen(bp);
12488 12488 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12489 12489 if (cnt % 8 == 0) {
12490 12490 (void) sprintf(bp++, "\n");
12491 12491 }
12492 12492
12493 12493 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12494 12494 bp += 9;
12495 12495 }
12496 12496
12497 12497 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12498 12498 bp += strlen(bp);
12499 12499 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12500 12500 if (cnt % 8 == 0) {
12501 12501 (void) sprintf(bp++, "\n");
12502 12502 }
12503 12503
12504 12504 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12505 12505 bp += 9;
12506 12506 }
12507 12507
12508 12508 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12509 12509 bp += strlen(bp);
12510 12510 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12511 12511 if (cnt % 8 == 0) {
12512 12512 (void) sprintf(bp++, "\n");
12513 12513 }
12514 12514
12515 12515 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12516 12516 bp += 9;
12517 12517 }
12518 12518
12519 12519 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12520 12520 bp += strlen(bp);
12521 12521 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12522 12522 if (cnt % 8 == 0) {
12523 12523 (void) sprintf(bp++, "\n");
12524 12524 }
12525 12525
12526 12526 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12527 12527 bp += 9;
12528 12528 }
12529 12529
12530 12530 (void) sprintf(bp, "\n\nRISC GP Registers");
12531 12531 bp += strlen(bp);
12532 12532 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12533 12533 if (cnt % 8 == 0) {
12534 12534 (void) sprintf(bp++, "\n");
12535 12535 }
12536 12536
12537 12537 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12538 12538 bp += 9;
12539 12539 }
12540 12540
12541 12541 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12542 12542 bp += strlen(bp);
12543 12543 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12544 12544 if (cnt % 8 == 0) {
12545 12545 (void) sprintf(bp++, "\n");
12546 12546 }
12547 12547
12548 12548 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12549 12549 bp += 9;
12550 12550 }
12551 12551
12552 12552 (void) sprintf(bp, "\n\nLMC Registers");
12553 12553 bp += strlen(bp);
12554 12554 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12555 12555 if (cnt % 8 == 0) {
12556 12556 (void) sprintf(bp++, "\n");
12557 12557 }
12558 12558
12559 12559 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12560 12560 bp += 9;
12561 12561 }
12562 12562
12563 12563 (void) sprintf(bp, "\n\nFPM Hardware Registers");
12564 12564 bp += strlen(bp);
12565 12565 for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12566 12566 if (cnt % 8 == 0) {
12567 12567 (void) sprintf(bp++, "\n");
12568 12568 }
12569 12569
12570 12570 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12571 12571 bp += 9;
12572 12572 }
12573 12573
12574 12574 (void) sprintf(bp, "\n\nFB Hardware Registers");
12575 12575 bp += strlen(bp);
12576 12576 for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12577 12577 if (cnt % 8 == 0) {
12578 12578 (void) sprintf(bp++, "\n");
12579 12579 }
12580 12580
12581 12581 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12582 12582 bp += 9;
12583 12583 }
12584 12584
12585 12585 (void) sprintf(bp, "\n\nCode RAM");
12586 12586 bp += strlen(bp);
12587 12587 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12588 12588 if (cnt % 8 == 0) {
12589 12589 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12590 12590 bp += 11;
12591 12591 }
12592 12592
12593 12593 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12594 12594 bp += 9;
12595 12595 }
12596 12596
12597 12597 (void) sprintf(bp, "\n\nExternal Memory");
12598 12598 bp += strlen(bp);
12599 12599 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12600 12600 if (cnt % 8 == 0) {
12601 12601 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12602 12602 bp += 11;
12603 12603 }
12604 12604 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12605 12605 bp += 9;
12606 12606 }
12607 12607
12608 12608 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12609 12609 bp += strlen(bp);
12610 12610
12611 12611 (void) sprintf(bp, "\n\nRequest Queue");
12612 12612 bp += strlen(bp);
12613 12613 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12614 12614 if (cnt % 8 == 0) {
12615 12615 (void) sprintf(bp, "\n%08x: ", cnt);
12616 12616 bp += strlen(bp);
12617 12617 }
12618 12618 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12619 12619 bp += strlen(bp);
12620 12620 }
12621 12621
12622 12622 (void) sprintf(bp, "\n\nResponse Queue");
12623 12623 bp += strlen(bp);
12624 12624 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12625 12625 if (cnt % 8 == 0) {
12626 12626 (void) sprintf(bp, "\n%08x: ", cnt);
12627 12627 bp += strlen(bp);
12628 12628 }
12629 12629 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12630 12630 bp += strlen(bp);
12631 12631 }
12632 12632
12633 12633 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12634 12634 (ha->fwexttracebuf.bp != NULL)) {
12635 12635 uint32_t cnt_b = 0;
12636 12636 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12637 12637
12638 12638 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12639 12639 bp += strlen(bp);
12640 12640 /* show data address as a byte address, data as long words */
12641 12641 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12642 12642 cnt_b = cnt * 4;
12643 12643 if (cnt_b % 32 == 0) {
12644 12644 (void) sprintf(bp, "\n%08x: ",
12645 12645 (int)(w64 + cnt_b));
12646 12646 bp += 11;
12647 12647 }
12648 12648 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12649 12649 bp += 9;
12650 12650 }
12651 12651 }
12652 12652
12653 12653 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12654 12654 (ha->fwfcetracebuf.bp != NULL)) {
12655 12655 uint32_t cnt_b = 0;
12656 12656 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12657 12657
12658 12658 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12659 12659 bp += strlen(bp);
12660 12660 /* show data address as a byte address, data as long words */
12661 12661 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12662 12662 cnt_b = cnt * 4;
12663 12663 if (cnt_b % 32 == 0) {
12664 12664 (void) sprintf(bp, "\n%08x: ",
12665 12665 (int)(w64 + cnt_b));
12666 12666 bp += 11;
12667 12667 }
12668 12668 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12669 12669 bp += 9;
12670 12670 }
12671 12671 }
12672 12672
12673 12673 (void) sprintf(bp, "\n\n");
12674 12674 bp += strlen(bp);
12675 12675
12676 12676 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12677 12677
12678 12678 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12679 12679
12680 12680 return (cnt);
12681 12681 }
12682 12682
12683 12683 /*
12684 12684 * ql_2581_ascii_fw_dump
12685 12685 * Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12686 12686 *
12687 12687 * Input:
12688 12688 * ha = adapter state pointer.
12689 12689 * bptr = buffer pointer.
12690 12690 *
12691 12691 * Returns:
12692 12692 * Amount of data buffer used.
12693 12693 *
12694 12694 * Context:
12695 12695 * Kernel context.
12696 12696 */
12697 12697 static size_t
12698 12698 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12699 12699 {
12700 12700 uint32_t cnt;
12701 12701 uint32_t cnt1;
12702 12702 caddr_t bp = bufp;
12703 12703 ql_25xx_fw_dump_t *fw = ha->ql_dump_ptr;
12704 12704
12705 12705 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12706 12706
12707 12707 (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12708 12708 ha->fw_major_version, ha->fw_minor_version,
12709 12709 ha->fw_subminor_version, ha->fw_attributes);
12710 12710 bp += strlen(bp);
12711 12711
12712 12712 (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12713 12713 bp += strlen(bp);
12714 12714
12715 12715 (void) sprintf(bp, "\nHostRisc Registers");
12716 12716 bp += strlen(bp);
12717 12717 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12718 12718 if (cnt % 8 == 0) {
12719 12719 (void) sprintf(bp++, "\n");
12720 12720 }
12721 12721 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12722 12722 bp += 9;
12723 12723 }
12724 12724
12725 12725 (void) sprintf(bp, "\n\nPCIe Registers");
12726 12726 bp += strlen(bp);
12727 12727 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12728 12728 if (cnt % 8 == 0) {
12729 12729 (void) sprintf(bp++, "\n");
12730 12730 }
12731 12731 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12732 12732 bp += 9;
12733 12733 }
12734 12734
12735 12735 (void) strcat(bp, "\n\nHost Interface Registers");
12736 12736 bp += strlen(bp);
12737 12737 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12738 12738 if (cnt % 8 == 0) {
12739 12739 (void) sprintf(bp++, "\n");
12740 12740 }
12741 12741 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12742 12742 bp += 9;
12743 12743 }
12744 12744
12745 12745 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12746 12746 bp += strlen(bp);
12747 12747 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12748 12748 if (cnt % 8 == 0) {
12749 12749 (void) sprintf(bp++, "\n");
12750 12750 }
12751 12751 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12752 12752 bp += 9;
12753 12753 }
12754 12754
12755 12755 (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12756 12756 fw->risc_io);
12757 12757 bp += strlen(bp);
12758 12758
12759 12759 (void) sprintf(bp, "\n\nMailbox Registers");
12760 12760 bp += strlen(bp);
12761 12761 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12762 12762 if (cnt % 16 == 0) {
12763 12763 (void) sprintf(bp++, "\n");
12764 12764 }
12765 12765 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12766 12766 bp += 5;
12767 12767 }
12768 12768
12769 12769 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12770 12770 bp += strlen(bp);
12771 12771 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12772 12772 if (cnt % 8 == 0) {
12773 12773 (void) sprintf(bp++, "\n");
12774 12774 }
12775 12775 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12776 12776 bp += 9;
12777 12777 }
12778 12778
12779 12779 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12780 12780 bp += strlen(bp);
12781 12781 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12782 12782 if (cnt % 8 == 0) {
12783 12783 (void) sprintf(bp++, "\n");
12784 12784 }
12785 12785 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12786 12786 bp += 9;
12787 12787 }
12788 12788
12789 12789 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12790 12790 bp += strlen(bp);
12791 12791 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12792 12792 if (cnt % 8 == 0) {
12793 12793 (void) sprintf(bp++, "\n");
12794 12794 }
12795 12795 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12796 12796 bp += 9;
12797 12797 }
12798 12798
12799 12799 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12800 12800 bp += strlen(bp);
12801 12801 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12802 12802 if (cnt % 8 == 0) {
12803 12803 (void) sprintf(bp++, "\n");
12804 12804 }
12805 12805 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12806 12806 bp += 9;
12807 12807 }
12808 12808
12809 12809 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12810 12810 bp += strlen(bp);
12811 12811 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12812 12812 if (cnt % 8 == 0) {
12813 12813 (void) sprintf(bp++, "\n");
12814 12814 }
12815 12815 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12816 12816 bp += 9;
12817 12817 }
12818 12818
12819 12819 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12820 12820 bp += strlen(bp);
12821 12821 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12822 12822 if (cnt % 8 == 0) {
12823 12823 (void) sprintf(bp++, "\n");
12824 12824 }
12825 12825 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12826 12826 bp += 9;
12827 12827 }
12828 12828
12829 12829 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12830 12830 bp += strlen(bp);
12831 12831 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12832 12832 if (cnt % 8 == 0) {
12833 12833 (void) sprintf(bp++, "\n");
12834 12834 }
12835 12835 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12836 12836 bp += 9;
12837 12837 }
12838 12838
12839 12839 (void) sprintf(bp, "\n\nASEQ GP Registers");
12840 12840 bp += strlen(bp);
12841 12841 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12842 12842 if (cnt % 8 == 0) {
12843 12843 (void) sprintf(bp++, "\n");
12844 12844 }
12845 12845 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12846 12846 bp += 9;
12847 12847 }
12848 12848
12849 12849 (void) sprintf(bp, "\n\nASEQ-0 Registers");
12850 12850 bp += strlen(bp);
12851 12851 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12852 12852 if (cnt % 8 == 0) {
12853 12853 (void) sprintf(bp++, "\n");
12854 12854 }
12855 12855 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12856 12856 bp += 9;
12857 12857 }
12858 12858
12859 12859 (void) sprintf(bp, "\n\nASEQ-1 Registers");
12860 12860 bp += strlen(bp);
12861 12861 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12862 12862 if (cnt % 8 == 0) {
12863 12863 (void) sprintf(bp++, "\n");
12864 12864 }
12865 12865 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12866 12866 bp += 9;
12867 12867 }
12868 12868
12869 12869 (void) sprintf(bp, "\n\nASEQ-2 Registers");
12870 12870 bp += strlen(bp);
12871 12871 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12872 12872 if (cnt % 8 == 0) {
12873 12873 (void) sprintf(bp++, "\n");
12874 12874 }
12875 12875 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12876 12876 bp += 9;
12877 12877 }
12878 12878
12879 12879 (void) sprintf(bp, "\n\nCommand DMA Registers");
12880 12880 bp += strlen(bp);
12881 12881 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12882 12882 if (cnt % 8 == 0) {
12883 12883 (void) sprintf(bp++, "\n");
12884 12884 }
12885 12885 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12886 12886 bp += 9;
12887 12887 }
12888 12888
12889 12889 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12890 12890 bp += strlen(bp);
12891 12891 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12892 12892 if (cnt % 8 == 0) {
12893 12893 (void) sprintf(bp++, "\n");
12894 12894 }
12895 12895 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12896 12896 bp += 9;
12897 12897 }
12898 12898
12899 12899 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12900 12900 bp += strlen(bp);
12901 12901 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12902 12902 if (cnt % 8 == 0) {
12903 12903 (void) sprintf(bp++, "\n");
12904 12904 }
12905 12905 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12906 12906 bp += 9;
12907 12907 }
12908 12908
12909 12909 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12910 12910 bp += strlen(bp);
12911 12911 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12912 12912 if (cnt % 8 == 0) {
12913 12913 (void) sprintf(bp++, "\n");
12914 12914 }
12915 12915 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12916 12916 bp += 9;
12917 12917 }
12918 12918
12919 12919 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12920 12920 bp += strlen(bp);
12921 12921 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12922 12922 if (cnt % 8 == 0) {
12923 12923 (void) sprintf(bp++, "\n");
12924 12924 }
12925 12925 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12926 12926 bp += 9;
12927 12927 }
12928 12928
12929 12929 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12930 12930 bp += strlen(bp);
12931 12931 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12932 12932 if (cnt % 8 == 0) {
12933 12933 (void) sprintf(bp++, "\n");
12934 12934 }
12935 12935 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12936 12936 bp += 9;
12937 12937 }
12938 12938
12939 12939 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12940 12940 bp += strlen(bp);
12941 12941 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12942 12942 if (cnt % 8 == 0) {
12943 12943 (void) sprintf(bp++, "\n");
12944 12944 }
12945 12945 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12946 12946 bp += 9;
12947 12947 }
12948 12948
12949 12949 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12950 12950 bp += strlen(bp);
12951 12951 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12952 12952 if (cnt % 8 == 0) {
12953 12953 (void) sprintf(bp++, "\n");
12954 12954 }
12955 12955 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12956 12956 bp += 9;
12957 12957 }
12958 12958
12959 12959 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12960 12960 bp += strlen(bp);
12961 12961 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12962 12962 if (cnt % 8 == 0) {
12963 12963 (void) sprintf(bp++, "\n");
12964 12964 }
12965 12965 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12966 12966 bp += 9;
12967 12967 }
12968 12968
12969 12969 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12970 12970 bp += strlen(bp);
12971 12971 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12972 12972 if (cnt % 8 == 0) {
12973 12973 (void) sprintf(bp++, "\n");
12974 12974 }
12975 12975 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12976 12976 bp += 9;
12977 12977 }
12978 12978
12979 12979 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12980 12980 bp += strlen(bp);
12981 12981 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12982 12982 if (cnt % 8 == 0) {
12983 12983 (void) sprintf(bp++, "\n");
12984 12984 }
12985 12985 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12986 12986 bp += 9;
12987 12987 }
12988 12988
12989 12989 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12990 12990 bp += strlen(bp);
12991 12991 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12992 12992 if (cnt % 8 == 0) {
12993 12993 (void) sprintf(bp++, "\n");
12994 12994 }
12995 12995 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12996 12996 bp += 9;
12997 12997 }
12998 12998
12999 12999 (void) sprintf(bp, "\n\nRISC GP Registers");
13000 13000 bp += strlen(bp);
13001 13001 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
13002 13002 if (cnt % 8 == 0) {
13003 13003 (void) sprintf(bp++, "\n");
13004 13004 }
13005 13005 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
13006 13006 bp += 9;
13007 13007 }
13008 13008
13009 13009 (void) sprintf(bp, "\n\nLMC Registers");
13010 13010 bp += strlen(bp);
13011 13011 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
13012 13012 if (cnt % 8 == 0) {
13013 13013 (void) sprintf(bp++, "\n");
13014 13014 }
13015 13015 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
13016 13016 bp += 9;
13017 13017 }
13018 13018
13019 13019 (void) sprintf(bp, "\n\nFPM Hardware Registers");
13020 13020 bp += strlen(bp);
13021 13021 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13022 13022 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
13023 13023 (uint32_t)(sizeof (fw->fpm_hdw_reg));
13024 13024 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13025 13025 if (cnt % 8 == 0) {
13026 13026 (void) sprintf(bp++, "\n");
13027 13027 }
13028 13028 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
13029 13029 bp += 9;
13030 13030 }
13031 13031
13032 13032 (void) sprintf(bp, "\n\nFB Hardware Registers");
13033 13033 bp += strlen(bp);
13034 13034 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
13035 13035 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
13036 13036 (uint32_t)(sizeof (fw->fb_hdw_reg));
13037 13037 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
13038 13038 if (cnt % 8 == 0) {
13039 13039 (void) sprintf(bp++, "\n");
13040 13040 }
13041 13041 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
13042 13042 bp += 9;
13043 13043 }
13044 13044
13045 13045 (void) sprintf(bp, "\n\nCode RAM");
13046 13046 bp += strlen(bp);
13047 13047 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
13048 13048 if (cnt % 8 == 0) {
13049 13049 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13050 13050 bp += 11;
13051 13051 }
13052 13052 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13053 13053 bp += 9;
13054 13054 }
13055 13055
13056 13056 (void) sprintf(bp, "\n\nExternal Memory");
13057 13057 bp += strlen(bp);
13058 13058 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13059 13059 if (cnt % 8 == 0) {
13060 13060 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13061 13061 bp += 11;
13062 13062 }
13063 13063 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13064 13064 bp += 9;
13065 13065 }
13066 13066
13067 13067 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13068 13068 bp += strlen(bp);
13069 13069
13070 13070 (void) sprintf(bp, "\n\nRequest Queue");
13071 13071 bp += strlen(bp);
13072 13072 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13073 13073 if (cnt % 8 == 0) {
13074 13074 (void) sprintf(bp, "\n%08x: ", cnt);
13075 13075 bp += strlen(bp);
13076 13076 }
13077 13077 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13078 13078 bp += strlen(bp);
13079 13079 }
13080 13080
13081 13081 (void) sprintf(bp, "\n\nResponse Queue");
13082 13082 bp += strlen(bp);
13083 13083 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13084 13084 if (cnt % 8 == 0) {
13085 13085 (void) sprintf(bp, "\n%08x: ", cnt);
13086 13086 bp += strlen(bp);
13087 13087 }
13088 13088 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13089 13089 bp += strlen(bp);
13090 13090 }
13091 13091
13092 13092 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13093 13093 (ha->fwexttracebuf.bp != NULL)) {
13094 13094 uint32_t cnt_b = 0;
13095 13095 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13096 13096
13097 13097 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13098 13098 bp += strlen(bp);
13099 13099 /* show data address as a byte address, data as long words */
13100 13100 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13101 13101 cnt_b = cnt * 4;
13102 13102 if (cnt_b % 32 == 0) {
13103 13103 (void) sprintf(bp, "\n%08x: ",
13104 13104 (int)(w64 + cnt_b));
13105 13105 bp += 11;
13106 13106 }
13107 13107 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13108 13108 bp += 9;
13109 13109 }
13110 13110 }
13111 13111
13112 13112 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13113 13113 (ha->fwfcetracebuf.bp != NULL)) {
13114 13114 uint32_t cnt_b = 0;
13115 13115 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13116 13116
13117 13117 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13118 13118 bp += strlen(bp);
13119 13119 /* show data address as a byte address, data as long words */
13120 13120 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13121 13121 cnt_b = cnt * 4;
13122 13122 if (cnt_b % 32 == 0) {
13123 13123 (void) sprintf(bp, "\n%08x: ",
13124 13124 (int)(w64 + cnt_b));
13125 13125 bp += 11;
13126 13126 }
13127 13127 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13128 13128 bp += 9;
13129 13129 }
13130 13130 }
13131 13131
13132 13132 (void) sprintf(bp, "\n\n");
13133 13133 bp += strlen(bp);
13134 13134
13135 13135 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13136 13136
13137 13137 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13138 13138
13139 13139 return (cnt);
13140 13140 }
13141 13141
13142 13142 /*
13143 13143 * ql_2200_binary_fw_dump
13144 13144 *
13145 13145 * Input:
13146 13146 * ha: adapter state pointer.
13147 13147 * fw: firmware dump context pointer.
13148 13148 *
13149 13149 * Returns:
13150 13150 * ql local function return status code.
13151 13151 *
13152 13152 * Context:
13153 13153 * Interrupt or Kernel context, no mailbox commands allowed.
13154 13154 */
13155 13155 static int
13156 13156 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13157 13157 {
13158 13158 uint32_t cnt;
13159 13159 uint16_t risc_address;
13160 13160 clock_t timer;
13161 13161 mbx_cmd_t mc;
13162 13162 mbx_cmd_t *mcp = &mc;
13163 13163 int rval = QL_SUCCESS;
13164 13164
13165 13165 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13166 13166
13167 13167 /* Disable ISP interrupts. */
13168 13168 WRT16_IO_REG(ha, ictrl, 0);
13169 13169 ADAPTER_STATE_LOCK(ha);
13170 13170 ha->flags &= ~INTERRUPTS_ENABLED;
13171 13171 ADAPTER_STATE_UNLOCK(ha);
13172 13172
13173 13173 /* Release mailbox registers. */
13174 13174 WRT16_IO_REG(ha, semaphore, 0);
13175 13175
13176 13176 /* Pause RISC. */
13177 13177 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13178 13178 timer = 30000;
13179 13179 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13180 13180 if (timer-- != 0) {
13181 13181 drv_usecwait(MILLISEC);
13182 13182 } else {
13183 13183 rval = QL_FUNCTION_TIMEOUT;
13184 13184 break;
13185 13185 }
13186 13186 }
13187 13187
13188 13188 if (rval == QL_SUCCESS) {
13189 13189 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13190 13190 sizeof (fw->pbiu_reg) / 2, 16);
13191 13191
13192 13192 /* In 2200 we only read 8 mailboxes */
13193 13193 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13194 13194 8, 16);
13195 13195
13196 13196 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13197 13197 sizeof (fw->dma_reg) / 2, 16);
13198 13198
13199 13199 WRT16_IO_REG(ha, ctrl_status, 0);
13200 13200 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13201 13201 sizeof (fw->risc_hdw_reg) / 2, 16);
13202 13202
13203 13203 WRT16_IO_REG(ha, pcr, 0x2000);
13204 13204 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13205 13205 sizeof (fw->risc_gp0_reg) / 2, 16);
13206 13206
13207 13207 WRT16_IO_REG(ha, pcr, 0x2100);
13208 13208 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13209 13209 sizeof (fw->risc_gp1_reg) / 2, 16);
13210 13210
13211 13211 WRT16_IO_REG(ha, pcr, 0x2200);
13212 13212 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13213 13213 sizeof (fw->risc_gp2_reg) / 2, 16);
13214 13214
13215 13215 WRT16_IO_REG(ha, pcr, 0x2300);
13216 13216 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13217 13217 sizeof (fw->risc_gp3_reg) / 2, 16);
13218 13218
13219 13219 WRT16_IO_REG(ha, pcr, 0x2400);
13220 13220 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13221 13221 sizeof (fw->risc_gp4_reg) / 2, 16);
13222 13222
13223 13223 WRT16_IO_REG(ha, pcr, 0x2500);
13224 13224 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13225 13225 sizeof (fw->risc_gp5_reg) / 2, 16);
13226 13226
13227 13227 WRT16_IO_REG(ha, pcr, 0x2600);
13228 13228 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13229 13229 sizeof (fw->risc_gp6_reg) / 2, 16);
13230 13230
13231 13231 WRT16_IO_REG(ha, pcr, 0x2700);
13232 13232 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13233 13233 sizeof (fw->risc_gp7_reg) / 2, 16);
13234 13234
13235 13235 WRT16_IO_REG(ha, ctrl_status, 0x10);
13236 13236 /* 2200 has only 16 registers */
13237 13237 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13238 13238 ha->iobase + 0x80, 16, 16);
13239 13239
13240 13240 WRT16_IO_REG(ha, ctrl_status, 0x20);
13241 13241 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13242 13242 sizeof (fw->fpm_b0_reg) / 2, 16);
13243 13243
13244 13244 WRT16_IO_REG(ha, ctrl_status, 0x30);
13245 13245 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13246 13246 sizeof (fw->fpm_b1_reg) / 2, 16);
13247 13247
13248 13248 /* Select FPM registers. */
13249 13249 WRT16_IO_REG(ha, ctrl_status, 0x20);
13250 13250
13251 13251 /* FPM Soft Reset. */
13252 13252 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13253 13253
13254 13254 /* Select frame buffer registers. */
13255 13255 WRT16_IO_REG(ha, ctrl_status, 0x10);
13256 13256
13257 13257 /* Reset frame buffer FIFOs. */
13258 13258 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13259 13259
13260 13260 /* Select RISC module registers. */
13261 13261 WRT16_IO_REG(ha, ctrl_status, 0);
13262 13262
13263 13263 /* Reset RISC module. */
13264 13264 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13265 13265
13266 13266 /* Reset ISP semaphore. */
13267 13267 WRT16_IO_REG(ha, semaphore, 0);
13268 13268
13269 13269 /* Release RISC module. */
13270 13270 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13271 13271
13272 13272 /* Wait for RISC to recover from reset. */
13273 13273 timer = 30000;
13274 13274 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13275 13275 if (timer-- != 0) {
13276 13276 drv_usecwait(MILLISEC);
13277 13277 } else {
13278 13278 rval = QL_FUNCTION_TIMEOUT;
13279 13279 break;
13280 13280 }
13281 13281 }
13282 13282
13283 13283 /* Disable RISC pause on FPM parity error. */
13284 13284 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13285 13285 }
13286 13286
13287 13287 if (rval == QL_SUCCESS) {
13288 13288 /* Pause RISC. */
13289 13289 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13290 13290 timer = 30000;
13291 13291 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13292 13292 if (timer-- != 0) {
13293 13293 drv_usecwait(MILLISEC);
13294 13294 } else {
13295 13295 rval = QL_FUNCTION_TIMEOUT;
13296 13296 break;
13297 13297 }
13298 13298 }
13299 13299 }
13300 13300
13301 13301 if (rval == QL_SUCCESS) {
13302 13302 /* Set memory configuration and timing. */
13303 13303 WRT16_IO_REG(ha, mctr, 0xf2);
13304 13304
13305 13305 /* Release RISC. */
13306 13306 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13307 13307
13308 13308 /* Get RISC SRAM. */
13309 13309 risc_address = 0x1000;
13310 13310 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13311 13311 for (cnt = 0; cnt < 0xf000; cnt++) {
13312 13312 WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13313 13313 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13314 13314 for (timer = 6000000; timer != 0; timer--) {
13315 13315 /* Check for pending interrupts. */
13316 13316 if (INTERRUPT_PENDING(ha)) {
13317 13317 if (RD16_IO_REG(ha, semaphore) &
13318 13318 BIT_0) {
13319 13319 WRT16_IO_REG(ha, hccr,
13320 13320 HC_CLR_RISC_INT);
13321 13321 mcp->mb[0] = RD16_IO_REG(ha,
13322 13322 mailbox_out[0]);
13323 13323 fw->risc_ram[cnt] =
13324 13324 RD16_IO_REG(ha,
13325 13325 mailbox_out[2]);
13326 13326 WRT16_IO_REG(ha,
13327 13327 semaphore, 0);
13328 13328 break;
13329 13329 }
13330 13330 WRT16_IO_REG(ha, hccr,
13331 13331 HC_CLR_RISC_INT);
13332 13332 }
13333 13333 drv_usecwait(5);
13334 13334 }
13335 13335
13336 13336 if (timer == 0) {
13337 13337 rval = QL_FUNCTION_TIMEOUT;
13338 13338 } else {
13339 13339 rval = mcp->mb[0];
13340 13340 }
13341 13341
13342 13342 if (rval != QL_SUCCESS) {
13343 13343 break;
13344 13344 }
13345 13345 }
13346 13346 }
13347 13347
13348 13348 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13349 13349
13350 13350 return (rval);
13351 13351 }
13352 13352
13353 13353 /*
13354 13354 * ql_2300_binary_fw_dump
13355 13355 *
13356 13356 * Input:
13357 13357 * ha: adapter state pointer.
13358 13358 * fw: firmware dump context pointer.
13359 13359 *
13360 13360 * Returns:
13361 13361 * ql local function return status code.
13362 13362 *
13363 13363 * Context:
13364 13364 * Interrupt or Kernel context, no mailbox commands allowed.
13365 13365 */
13366 13366 static int
13367 13367 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13368 13368 {
13369 13369 clock_t timer;
13370 13370 int rval = QL_SUCCESS;
13371 13371
13372 13372 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13373 13373
13374 13374 /* Disable ISP interrupts. */
13375 13375 WRT16_IO_REG(ha, ictrl, 0);
13376 13376 ADAPTER_STATE_LOCK(ha);
13377 13377 ha->flags &= ~INTERRUPTS_ENABLED;
13378 13378 ADAPTER_STATE_UNLOCK(ha);
13379 13379
13380 13380 /* Release mailbox registers. */
13381 13381 WRT16_IO_REG(ha, semaphore, 0);
13382 13382
13383 13383 /* Pause RISC. */
13384 13384 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13385 13385 timer = 30000;
13386 13386 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13387 13387 if (timer-- != 0) {
13388 13388 drv_usecwait(MILLISEC);
13389 13389 } else {
13390 13390 rval = QL_FUNCTION_TIMEOUT;
13391 13391 break;
13392 13392 }
13393 13393 }
13394 13394
13395 13395 if (rval == QL_SUCCESS) {
13396 13396 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13397 13397 sizeof (fw->pbiu_reg) / 2, 16);
13398 13398
13399 13399 (void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13400 13400 sizeof (fw->risc_host_reg) / 2, 16);
13401 13401
13402 13402 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13403 13403 sizeof (fw->mailbox_reg) / 2, 16);
13404 13404
13405 13405 WRT16_IO_REG(ha, ctrl_status, 0x40);
13406 13406 (void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13407 13407 sizeof (fw->resp_dma_reg) / 2, 16);
13408 13408
13409 13409 WRT16_IO_REG(ha, ctrl_status, 0x50);
13410 13410 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13411 13411 sizeof (fw->dma_reg) / 2, 16);
13412 13412
13413 13413 WRT16_IO_REG(ha, ctrl_status, 0);
13414 13414 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13415 13415 sizeof (fw->risc_hdw_reg) / 2, 16);
13416 13416
13417 13417 WRT16_IO_REG(ha, pcr, 0x2000);
13418 13418 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13419 13419 sizeof (fw->risc_gp0_reg) / 2, 16);
13420 13420
13421 13421 WRT16_IO_REG(ha, pcr, 0x2200);
13422 13422 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13423 13423 sizeof (fw->risc_gp1_reg) / 2, 16);
13424 13424
13425 13425 WRT16_IO_REG(ha, pcr, 0x2400);
13426 13426 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13427 13427 sizeof (fw->risc_gp2_reg) / 2, 16);
13428 13428
13429 13429 WRT16_IO_REG(ha, pcr, 0x2600);
13430 13430 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13431 13431 sizeof (fw->risc_gp3_reg) / 2, 16);
13432 13432
13433 13433 WRT16_IO_REG(ha, pcr, 0x2800);
13434 13434 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13435 13435 sizeof (fw->risc_gp4_reg) / 2, 16);
13436 13436
13437 13437 WRT16_IO_REG(ha, pcr, 0x2A00);
13438 13438 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13439 13439 sizeof (fw->risc_gp5_reg) / 2, 16);
13440 13440
13441 13441 WRT16_IO_REG(ha, pcr, 0x2C00);
13442 13442 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13443 13443 sizeof (fw->risc_gp6_reg) / 2, 16);
13444 13444
13445 13445 WRT16_IO_REG(ha, pcr, 0x2E00);
13446 13446 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13447 13447 sizeof (fw->risc_gp7_reg) / 2, 16);
13448 13448
13449 13449 WRT16_IO_REG(ha, ctrl_status, 0x10);
13450 13450 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13451 13451 ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13452 13452
13453 13453 WRT16_IO_REG(ha, ctrl_status, 0x20);
13454 13454 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13455 13455 sizeof (fw->fpm_b0_reg) / 2, 16);
13456 13456
13457 13457 WRT16_IO_REG(ha, ctrl_status, 0x30);
13458 13458 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13459 13459 sizeof (fw->fpm_b1_reg) / 2, 16);
13460 13460
13461 13461 /* Select FPM registers. */
13462 13462 WRT16_IO_REG(ha, ctrl_status, 0x20);
13463 13463
13464 13464 /* FPM Soft Reset. */
13465 13465 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13466 13466
13467 13467 /* Select frame buffer registers. */
13468 13468 WRT16_IO_REG(ha, ctrl_status, 0x10);
13469 13469
13470 13470 /* Reset frame buffer FIFOs. */
13471 13471 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13472 13472
13473 13473 /* Select RISC module registers. */
13474 13474 WRT16_IO_REG(ha, ctrl_status, 0);
13475 13475
13476 13476 /* Reset RISC module. */
13477 13477 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13478 13478
13479 13479 /* Reset ISP semaphore. */
13480 13480 WRT16_IO_REG(ha, semaphore, 0);
13481 13481
13482 13482 /* Release RISC module. */
13483 13483 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13484 13484
13485 13485 /* Wait for RISC to recover from reset. */
13486 13486 timer = 30000;
13487 13487 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13488 13488 if (timer-- != 0) {
13489 13489 drv_usecwait(MILLISEC);
13490 13490 } else {
13491 13491 rval = QL_FUNCTION_TIMEOUT;
13492 13492 break;
13493 13493 }
13494 13494 }
13495 13495
13496 13496 /* Disable RISC pause on FPM parity error. */
13497 13497 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13498 13498 }
13499 13499
13500 13500 /* Get RISC SRAM. */
13501 13501 if (rval == QL_SUCCESS) {
13502 13502 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13503 13503 }
13504 13504 /* Get STACK SRAM. */
13505 13505 if (rval == QL_SUCCESS) {
13506 13506 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13507 13507 }
13508 13508 /* Get DATA SRAM. */
13509 13509 if (rval == QL_SUCCESS) {
13510 13510 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13511 13511 }
13512 13512
13513 13513 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13514 13514
13515 13515 return (rval);
13516 13516 }
13517 13517
13518 13518 /*
13519 13519 * ql_24xx_binary_fw_dump
13520 13520 *
13521 13521 * Input:
13522 13522 * ha: adapter state pointer.
13523 13523 * fw: firmware dump context pointer.
13524 13524 *
13525 13525 * Returns:
13526 13526 * ql local function return status code.
13527 13527 *
13528 13528 * Context:
13529 13529 * Interrupt or Kernel context, no mailbox commands allowed.
13530 13530 */
13531 13531 static int
13532 13532 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13533 13533 {
13534 13534 uint32_t *reg32;
13535 13535 void *bp;
13536 13536 clock_t timer;
13537 13537 int rval = QL_SUCCESS;
13538 13538
13539 13539 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13540 13540
13541 13541 fw->hccr = RD32_IO_REG(ha, hccr);
13542 13542
13543 13543 /* Pause RISC. */
13544 13544 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13545 13545 /* Disable ISP interrupts. */
13546 13546 WRT16_IO_REG(ha, ictrl, 0);
13547 13547
13548 13548 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13549 13549 for (timer = 30000;
13550 13550 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13551 13551 rval == QL_SUCCESS; timer--) {
13552 13552 if (timer) {
13553 13553 drv_usecwait(100);
13554 13554 } else {
13555 13555 rval = QL_FUNCTION_TIMEOUT;
13556 13556 }
13557 13557 }
13558 13558 }
13559 13559
13560 13560 if (rval == QL_SUCCESS) {
13561 13561 /* Host interface registers. */
13562 13562 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13563 13563 sizeof (fw->host_reg) / 4, 32);
13564 13564
13565 13565 /* Disable ISP interrupts. */
13566 13566 WRT32_IO_REG(ha, ictrl, 0);
13567 13567 RD32_IO_REG(ha, ictrl);
13568 13568 ADAPTER_STATE_LOCK(ha);
13569 13569 ha->flags &= ~INTERRUPTS_ENABLED;
13570 13570 ADAPTER_STATE_UNLOCK(ha);
13571 13571
13572 13572 /* Shadow registers. */
13573 13573
13574 13574 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13575 13575 RD32_IO_REG(ha, io_base_addr);
13576 13576
13577 13577 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13578 13578 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13579 13579 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13580 13580 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13581 13581
13582 13582 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13583 13583 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13584 13584 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13585 13585 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13586 13586
13587 13587 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13588 13588 WRT_REG_DWORD(ha, reg32, 0xB0200000);
13589 13589 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13590 13590 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13591 13591
13592 13592 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13593 13593 WRT_REG_DWORD(ha, reg32, 0xB0300000);
13594 13594 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13595 13595 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13596 13596
13597 13597 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13598 13598 WRT_REG_DWORD(ha, reg32, 0xB0400000);
13599 13599 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13600 13600 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13601 13601
13602 13602 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13603 13603 WRT_REG_DWORD(ha, reg32, 0xB0500000);
13604 13604 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13605 13605 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13606 13606
13607 13607 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13608 13608 WRT_REG_DWORD(ha, reg32, 0xB0600000);
13609 13609 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13610 13610 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13611 13611
13612 13612 /* Mailbox registers. */
13613 13613 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13614 13614 sizeof (fw->mailbox_reg) / 2, 16);
13615 13615
13616 13616 /* Transfer sequence registers. */
13617 13617
13618 13618 /* XSEQ GP */
13619 13619 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13620 13620 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13621 13621 16, 32);
13622 13622 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13623 13623 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13624 13624 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13625 13625 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13626 13626 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13627 13627 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13628 13628 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13629 13629 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13630 13630 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13631 13631 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13632 13632 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13633 13633 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13634 13634 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13635 13635 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13636 13636
13637 13637 /* XSEQ-0 */
13638 13638 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13639 13639 (void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13640 13640 sizeof (fw->xseq_0_reg) / 4, 32);
13641 13641
13642 13642 /* XSEQ-1 */
13643 13643 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13644 13644 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13645 13645 sizeof (fw->xseq_1_reg) / 4, 32);
13646 13646
13647 13647 /* Receive sequence registers. */
13648 13648
13649 13649 /* RSEQ GP */
13650 13650 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13651 13651 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13652 13652 16, 32);
13653 13653 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13654 13654 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13655 13655 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13656 13656 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13657 13657 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13658 13658 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13659 13659 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13660 13660 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13661 13661 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13662 13662 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13663 13663 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13664 13664 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13665 13665 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13666 13666 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13667 13667
13668 13668 /* RSEQ-0 */
13669 13669 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13670 13670 (void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13671 13671 sizeof (fw->rseq_0_reg) / 4, 32);
13672 13672
13673 13673 /* RSEQ-1 */
13674 13674 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13675 13675 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13676 13676 sizeof (fw->rseq_1_reg) / 4, 32);
13677 13677
13678 13678 /* RSEQ-2 */
13679 13679 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13680 13680 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13681 13681 sizeof (fw->rseq_2_reg) / 4, 32);
13682 13682
13683 13683 /* Command DMA registers. */
13684 13684
13685 13685 WRT32_IO_REG(ha, io_base_addr, 0x7100);
13686 13686 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13687 13687 sizeof (fw->cmd_dma_reg) / 4, 32);
13688 13688
13689 13689 /* Queues. */
13690 13690
13691 13691 /* RequestQ0 */
13692 13692 WRT32_IO_REG(ha, io_base_addr, 0x7200);
13693 13693 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13694 13694 8, 32);
13695 13695 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13696 13696
13697 13697 /* ResponseQ0 */
13698 13698 WRT32_IO_REG(ha, io_base_addr, 0x7300);
13699 13699 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13700 13700 8, 32);
13701 13701 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13702 13702
13703 13703 /* RequestQ1 */
13704 13704 WRT32_IO_REG(ha, io_base_addr, 0x7400);
13705 13705 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13706 13706 8, 32);
13707 13707 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13708 13708
13709 13709 /* Transmit DMA registers. */
13710 13710
13711 13711 /* XMT0 */
13712 13712 WRT32_IO_REG(ha, io_base_addr, 0x7600);
13713 13713 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13714 13714 16, 32);
13715 13715 WRT32_IO_REG(ha, io_base_addr, 0x7610);
13716 13716 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13717 13717
13718 13718 /* XMT1 */
13719 13719 WRT32_IO_REG(ha, io_base_addr, 0x7620);
13720 13720 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13721 13721 16, 32);
13722 13722 WRT32_IO_REG(ha, io_base_addr, 0x7630);
13723 13723 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13724 13724
13725 13725 /* XMT2 */
13726 13726 WRT32_IO_REG(ha, io_base_addr, 0x7640);
13727 13727 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13728 13728 16, 32);
13729 13729 WRT32_IO_REG(ha, io_base_addr, 0x7650);
13730 13730 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13731 13731
13732 13732 /* XMT3 */
13733 13733 WRT32_IO_REG(ha, io_base_addr, 0x7660);
13734 13734 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13735 13735 16, 32);
13736 13736 WRT32_IO_REG(ha, io_base_addr, 0x7670);
13737 13737 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13738 13738
13739 13739 /* XMT4 */
13740 13740 WRT32_IO_REG(ha, io_base_addr, 0x7680);
13741 13741 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13742 13742 16, 32);
13743 13743 WRT32_IO_REG(ha, io_base_addr, 0x7690);
13744 13744 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13745 13745
13746 13746 /* XMT Common */
13747 13747 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13748 13748 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13749 13749 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13750 13750
13751 13751 /* Receive DMA registers. */
13752 13752
13753 13753 /* RCVThread0 */
13754 13754 WRT32_IO_REG(ha, io_base_addr, 0x7700);
13755 13755 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13756 13756 ha->iobase + 0xC0, 16, 32);
13757 13757 WRT32_IO_REG(ha, io_base_addr, 0x7710);
13758 13758 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13759 13759
13760 13760 /* RCVThread1 */
13761 13761 WRT32_IO_REG(ha, io_base_addr, 0x7720);
13762 13762 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13763 13763 ha->iobase + 0xC0, 16, 32);
13764 13764 WRT32_IO_REG(ha, io_base_addr, 0x7730);
13765 13765 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13766 13766
13767 13767 /* RISC registers. */
13768 13768
13769 13769 /* RISC GP */
13770 13770 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13771 13771 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13772 13772 16, 32);
13773 13773 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13774 13774 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13775 13775 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13776 13776 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13777 13777 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13778 13778 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13779 13779 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13780 13780 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13781 13781 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13782 13782 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13783 13783 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13784 13784 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13785 13785 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13786 13786 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13787 13787
13788 13788 /* Local memory controller registers. */
13789 13789
13790 13790 /* LMC */
13791 13791 WRT32_IO_REG(ha, io_base_addr, 0x3000);
13792 13792 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13793 13793 16, 32);
13794 13794 WRT32_IO_REG(ha, io_base_addr, 0x3010);
13795 13795 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13796 13796 WRT32_IO_REG(ha, io_base_addr, 0x3020);
13797 13797 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13798 13798 WRT32_IO_REG(ha, io_base_addr, 0x3030);
13799 13799 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13800 13800 WRT32_IO_REG(ha, io_base_addr, 0x3040);
13801 13801 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13802 13802 WRT32_IO_REG(ha, io_base_addr, 0x3050);
13803 13803 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13804 13804 WRT32_IO_REG(ha, io_base_addr, 0x3060);
13805 13805 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13806 13806
13807 13807 /* Fibre Protocol Module registers. */
13808 13808
13809 13809 /* FPM hardware */
13810 13810 WRT32_IO_REG(ha, io_base_addr, 0x4000);
13811 13811 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13812 13812 16, 32);
13813 13813 WRT32_IO_REG(ha, io_base_addr, 0x4010);
13814 13814 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13815 13815 WRT32_IO_REG(ha, io_base_addr, 0x4020);
13816 13816 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13817 13817 WRT32_IO_REG(ha, io_base_addr, 0x4030);
13818 13818 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13819 13819 WRT32_IO_REG(ha, io_base_addr, 0x4040);
13820 13820 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13821 13821 WRT32_IO_REG(ha, io_base_addr, 0x4050);
13822 13822 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13823 13823 WRT32_IO_REG(ha, io_base_addr, 0x4060);
13824 13824 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13825 13825 WRT32_IO_REG(ha, io_base_addr, 0x4070);
13826 13826 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13827 13827 WRT32_IO_REG(ha, io_base_addr, 0x4080);
13828 13828 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13829 13829 WRT32_IO_REG(ha, io_base_addr, 0x4090);
13830 13830 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13831 13831 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13832 13832 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13833 13833 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13834 13834 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13835 13835
13836 13836 /* Frame Buffer registers. */
13837 13837
13838 13838 /* FB hardware */
13839 13839 WRT32_IO_REG(ha, io_base_addr, 0x6000);
13840 13840 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13841 13841 16, 32);
13842 13842 WRT32_IO_REG(ha, io_base_addr, 0x6010);
13843 13843 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13844 13844 WRT32_IO_REG(ha, io_base_addr, 0x6020);
13845 13845 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13846 13846 WRT32_IO_REG(ha, io_base_addr, 0x6030);
13847 13847 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13848 13848 WRT32_IO_REG(ha, io_base_addr, 0x6040);
13849 13849 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13850 13850 WRT32_IO_REG(ha, io_base_addr, 0x6100);
13851 13851 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13852 13852 WRT32_IO_REG(ha, io_base_addr, 0x6130);
13853 13853 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13854 13854 WRT32_IO_REG(ha, io_base_addr, 0x6150);
13855 13855 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13856 13856 WRT32_IO_REG(ha, io_base_addr, 0x6170);
13857 13857 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13858 13858 WRT32_IO_REG(ha, io_base_addr, 0x6190);
13859 13859 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13860 13860 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13861 13861 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13862 13862 }
13863 13863
13864 13864 /* Get the request queue */
13865 13865 if (rval == QL_SUCCESS) {
13866 13866 uint32_t cnt;
13867 13867 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
13868 13868
13869 13869 /* Sync DMA buffer. */
13870 13870 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13871 13871 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13872 13872 DDI_DMA_SYNC_FORKERNEL);
13873 13873
13874 13874 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13875 13875 fw->req_q[cnt] = *w32++;
13876 13876 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13877 13877 }
13878 13878 }
13879 13879
13880 13880 /* Get the response queue */
13881 13881 if (rval == QL_SUCCESS) {
13882 13882 uint32_t cnt;
13883 13883 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
13884 13884
13885 13885 /* Sync DMA buffer. */
13886 13886 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13887 13887 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13888 13888 DDI_DMA_SYNC_FORKERNEL);
13889 13889
13890 13890 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13891 13891 fw->rsp_q[cnt] = *w32++;
13892 13892 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13893 13893 }
13894 13894 }
13895 13895
13896 13896 /* Reset RISC. */
13897 13897 ql_reset_chip(ha);
13898 13898
13899 13899 /* Memory. */
13900 13900 if (rval == QL_SUCCESS) {
13901 13901 /* Code RAM. */
13902 13902 rval = ql_read_risc_ram(ha, 0x20000,
13903 13903 sizeof (fw->code_ram) / 4, fw->code_ram);
13904 13904 }
13905 13905 if (rval == QL_SUCCESS) {
13906 13906 /* External Memory. */
13907 13907 rval = ql_read_risc_ram(ha, 0x100000,
13908 13908 ha->fw_ext_memory_size / 4, fw->ext_mem);
13909 13909 }
13910 13910
13911 13911 /* Get the extended trace buffer */
13912 13912 if (rval == QL_SUCCESS) {
13913 13913 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13914 13914 (ha->fwexttracebuf.bp != NULL)) {
13915 13915 uint32_t cnt;
13916 13916 uint32_t *w32 = ha->fwexttracebuf.bp;
13917 13917
13918 13918 /* Sync DMA buffer. */
13919 13919 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13920 13920 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13921 13921
13922 13922 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13923 13923 fw->ext_trace_buf[cnt] = *w32++;
13924 13924 }
13925 13925 }
13926 13926 }
13927 13927
13928 13928 /* Get the FC event trace buffer */
13929 13929 if (rval == QL_SUCCESS) {
13930 13930 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13931 13931 (ha->fwfcetracebuf.bp != NULL)) {
13932 13932 uint32_t cnt;
13933 13933 uint32_t *w32 = ha->fwfcetracebuf.bp;
13934 13934
13935 13935 /* Sync DMA buffer. */
13936 13936 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13937 13937 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13938 13938
13939 13939 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13940 13940 fw->fce_trace_buf[cnt] = *w32++;
13941 13941 }
13942 13942 }
13943 13943 }
13944 13944
13945 13945 if (rval != QL_SUCCESS) {
13946 13946 EL(ha, "failed=%xh\n", rval);
13947 13947 } else {
13948 13948 /*EMPTY*/
13949 13949 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13950 13950 }
13951 13951
13952 13952 return (rval);
13953 13953 }
13954 13954
13955 13955 /*
13956 13956 * ql_25xx_binary_fw_dump
13957 13957 *
13958 13958 * Input:
13959 13959 * ha: adapter state pointer.
13960 13960 * fw: firmware dump context pointer.
13961 13961 *
13962 13962 * Returns:
13963 13963 * ql local function return status code.
13964 13964 *
13965 13965 * Context:
13966 13966 * Interrupt or Kernel context, no mailbox commands allowed.
13967 13967 */
13968 13968 static int
13969 13969 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13970 13970 {
13971 13971 uint32_t *reg32;
13972 13972 void *bp;
13973 13973 clock_t timer;
13974 13974 int rval = QL_SUCCESS;
13975 13975
13976 13976 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13977 13977
13978 13978 fw->r2h_status = RD32_IO_REG(ha, risc2host);
13979 13979
13980 13980 /* Pause RISC. */
13981 13981 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13982 13982 /* Disable ISP interrupts. */
13983 13983 WRT16_IO_REG(ha, ictrl, 0);
13984 13984
13985 13985 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13986 13986 for (timer = 30000;
13987 13987 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13988 13988 rval == QL_SUCCESS; timer--) {
13989 13989 if (timer) {
13990 13990 drv_usecwait(100);
13991 13991 if (timer % 10000 == 0) {
13992 13992 EL(ha, "risc pause %d\n", timer);
13993 13993 }
13994 13994 } else {
13995 13995 EL(ha, "risc pause timeout\n");
13996 13996 rval = QL_FUNCTION_TIMEOUT;
13997 13997 }
13998 13998 }
13999 13999 }
14000 14000
14001 14001 if (rval == QL_SUCCESS) {
14002 14002
14003 14003 /* Host Interface registers */
14004 14004
14005 14005 /* HostRisc registers. */
14006 14006 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14007 14007 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14008 14008 16, 32);
14009 14009 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14010 14010 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14011 14011
14012 14012 /* PCIe registers. */
14013 14013 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14014 14014 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14015 14015 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14016 14016 3, 32);
14017 14017 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14018 14018 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14019 14019
14020 14020 /* Host interface registers. */
14021 14021 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14022 14022 sizeof (fw->host_reg) / 4, 32);
14023 14023
14024 14024 /* Disable ISP interrupts. */
14025 14025
14026 14026 WRT32_IO_REG(ha, ictrl, 0);
14027 14027 RD32_IO_REG(ha, ictrl);
14028 14028 ADAPTER_STATE_LOCK(ha);
14029 14029 ha->flags &= ~INTERRUPTS_ENABLED;
14030 14030 ADAPTER_STATE_UNLOCK(ha);
14031 14031
14032 14032 /* Shadow registers. */
14033 14033
14034 14034 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14035 14035 RD32_IO_REG(ha, io_base_addr);
14036 14036
14037 14037 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14038 14038 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14039 14039 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14040 14040 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14041 14041
14042 14042 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14043 14043 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14044 14044 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14045 14045 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14046 14046
14047 14047 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14048 14048 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14049 14049 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14050 14050 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14051 14051
14052 14052 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14053 14053 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14054 14054 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14055 14055 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14056 14056
14057 14057 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14058 14058 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14059 14059 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14060 14060 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14061 14061
14062 14062 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14063 14063 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14064 14064 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14065 14065 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14066 14066
14067 14067 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14068 14068 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14069 14069 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14070 14070 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14071 14071
14072 14072 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14073 14073 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14074 14074 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14075 14075 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14076 14076
14077 14077 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14078 14078 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14079 14079 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14080 14080 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14081 14081
14082 14082 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14083 14083 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14084 14084 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14085 14085 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14086 14086
14087 14087 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14088 14088 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14089 14089 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14090 14090 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14091 14091
14092 14092 /* RISC I/O register. */
14093 14093
14094 14094 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14095 14095 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14096 14096 1, 32);
14097 14097
14098 14098 /* Mailbox registers. */
14099 14099
14100 14100 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14101 14101 sizeof (fw->mailbox_reg) / 2, 16);
14102 14102
14103 14103 /* Transfer sequence registers. */
14104 14104
14105 14105 /* XSEQ GP */
14106 14106 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14107 14107 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14108 14108 16, 32);
14109 14109 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14110 14110 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14111 14111 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14112 14112 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14113 14113 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14114 14114 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14115 14115 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14116 14116 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14117 14117 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14118 14118 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14119 14119 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14120 14120 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14121 14121 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14122 14122 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14123 14123
14124 14124 /* XSEQ-0 */
14125 14125 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14126 14126 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14127 14127 16, 32);
14128 14128 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14129 14129 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14130 14130 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14131 14131 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14132 14132
14133 14133 /* XSEQ-1 */
14134 14134 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14135 14135 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14136 14136 16, 32);
14137 14137
14138 14138 /* Receive sequence registers. */
14139 14139
14140 14140 /* RSEQ GP */
14141 14141 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14142 14142 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14143 14143 16, 32);
14144 14144 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14145 14145 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14146 14146 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14147 14147 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14148 14148 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14149 14149 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14150 14150 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14151 14151 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14152 14152 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14153 14153 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14154 14154 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14155 14155 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14156 14156 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14157 14157 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14158 14158
14159 14159 /* RSEQ-0 */
14160 14160 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14161 14161 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14162 14162 16, 32);
14163 14163 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14164 14164 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14165 14165
14166 14166 /* RSEQ-1 */
14167 14167 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14168 14168 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14169 14169 sizeof (fw->rseq_1_reg) / 4, 32);
14170 14170
14171 14171 /* RSEQ-2 */
14172 14172 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14173 14173 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14174 14174 sizeof (fw->rseq_2_reg) / 4, 32);
14175 14175
14176 14176 /* Auxiliary sequencer registers. */
14177 14177
14178 14178 /* ASEQ GP */
14179 14179 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14180 14180 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14181 14181 16, 32);
14182 14182 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14183 14183 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14184 14184 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14185 14185 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14186 14186 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14187 14187 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14188 14188 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14189 14189 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14190 14190 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14191 14191 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14192 14192 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14193 14193 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14194 14194 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14195 14195 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14196 14196
14197 14197 /* ASEQ-0 */
14198 14198 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14199 14199 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14200 14200 16, 32);
14201 14201 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14202 14202 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14203 14203
14204 14204 /* ASEQ-1 */
14205 14205 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14206 14206 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14207 14207 16, 32);
14208 14208
14209 14209 /* ASEQ-2 */
14210 14210 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14211 14211 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14212 14212 16, 32);
14213 14213
14214 14214 /* Command DMA registers. */
14215 14215
14216 14216 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14217 14217 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14218 14218 sizeof (fw->cmd_dma_reg) / 4, 32);
14219 14219
14220 14220 /* Queues. */
14221 14221
14222 14222 /* RequestQ0 */
14223 14223 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14224 14224 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14225 14225 8, 32);
14226 14226 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14227 14227
14228 14228 /* ResponseQ0 */
14229 14229 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14230 14230 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14231 14231 8, 32);
14232 14232 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14233 14233
14234 14234 /* RequestQ1 */
14235 14235 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14236 14236 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14237 14237 8, 32);
14238 14238 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14239 14239
14240 14240 /* Transmit DMA registers. */
14241 14241
14242 14242 /* XMT0 */
14243 14243 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14244 14244 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14245 14245 16, 32);
14246 14246 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14247 14247 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14248 14248
14249 14249 /* XMT1 */
14250 14250 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14251 14251 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14252 14252 16, 32);
14253 14253 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14254 14254 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14255 14255
14256 14256 /* XMT2 */
14257 14257 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14258 14258 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14259 14259 16, 32);
14260 14260 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14261 14261 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14262 14262
14263 14263 /* XMT3 */
14264 14264 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14265 14265 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14266 14266 16, 32);
14267 14267 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14268 14268 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14269 14269
14270 14270 /* XMT4 */
14271 14271 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14272 14272 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14273 14273 16, 32);
14274 14274 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14275 14275 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14276 14276
14277 14277 /* XMT Common */
14278 14278 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14279 14279 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14280 14280 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14281 14281
14282 14282 /* Receive DMA registers. */
14283 14283
14284 14284 /* RCVThread0 */
14285 14285 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14286 14286 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14287 14287 ha->iobase + 0xC0, 16, 32);
14288 14288 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14289 14289 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14290 14290
14291 14291 /* RCVThread1 */
14292 14292 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14293 14293 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14294 14294 ha->iobase + 0xC0, 16, 32);
14295 14295 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14296 14296 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14297 14297
14298 14298 /* RISC registers. */
14299 14299
14300 14300 /* RISC GP */
14301 14301 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14302 14302 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14303 14303 16, 32);
14304 14304 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14305 14305 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14306 14306 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14307 14307 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14308 14308 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14309 14309 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14310 14310 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14311 14311 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14312 14312 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14313 14313 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14314 14314 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14315 14315 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14316 14316 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14317 14317 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14318 14318
14319 14319 /* Local memory controller (LMC) registers. */
14320 14320
14321 14321 /* LMC */
14322 14322 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14323 14323 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14324 14324 16, 32);
14325 14325 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14326 14326 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14327 14327 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14328 14328 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14329 14329 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14330 14330 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14331 14331 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14332 14332 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14333 14333 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14334 14334 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14335 14335 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14336 14336 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14337 14337 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14338 14338 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14339 14339
14340 14340 /* Fibre Protocol Module registers. */
14341 14341
14342 14342 /* FPM hardware */
14343 14343 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14344 14344 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14345 14345 16, 32);
14346 14346 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14347 14347 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14348 14348 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14349 14349 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14350 14350 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14351 14351 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14352 14352 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14353 14353 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14354 14354 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14355 14355 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14356 14356 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14357 14357 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14358 14358 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14359 14359 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14360 14360 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14361 14361 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14362 14362 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14363 14363 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14364 14364 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14365 14365 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14366 14366 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14367 14367 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14368 14368
14369 14369 /* Frame Buffer registers. */
14370 14370
14371 14371 /* FB hardware */
14372 14372 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14373 14373 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14374 14374 16, 32);
14375 14375 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14376 14376 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14377 14377 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14378 14378 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14379 14379 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14380 14380 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14381 14381 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14382 14382 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14383 14383 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14384 14384 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14385 14385 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14386 14386 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14387 14387 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14388 14388 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14389 14389 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14390 14390 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14391 14391 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14392 14392 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14393 14393 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14394 14394 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14395 14395 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14396 14396 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14397 14397 }
14398 14398
14399 14399 /* Get the request queue */
14400 14400 if (rval == QL_SUCCESS) {
14401 14401 uint32_t cnt;
14402 14402 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14403 14403
14404 14404 /* Sync DMA buffer. */
14405 14405 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14406 14406 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14407 14407 DDI_DMA_SYNC_FORKERNEL);
14408 14408
14409 14409 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14410 14410 fw->req_q[cnt] = *w32++;
14411 14411 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14412 14412 }
14413 14413 }
14414 14414
14415 14415 /* Get the respons queue */
14416 14416 if (rval == QL_SUCCESS) {
14417 14417 uint32_t cnt;
14418 14418 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14419 14419
14420 14420 /* Sync DMA buffer. */
14421 14421 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14422 14422 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14423 14423 DDI_DMA_SYNC_FORKERNEL);
14424 14424
14425 14425 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14426 14426 fw->rsp_q[cnt] = *w32++;
14427 14427 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14428 14428 }
14429 14429 }
14430 14430
14431 14431 /* Reset RISC. */
14432 14432
14433 14433 ql_reset_chip(ha);
14434 14434
14435 14435 /* Memory. */
14436 14436
14437 14437 if (rval == QL_SUCCESS) {
14438 14438 /* Code RAM. */
14439 14439 rval = ql_read_risc_ram(ha, 0x20000,
14440 14440 sizeof (fw->code_ram) / 4, fw->code_ram);
14441 14441 }
14442 14442 if (rval == QL_SUCCESS) {
14443 14443 /* External Memory. */
14444 14444 rval = ql_read_risc_ram(ha, 0x100000,
14445 14445 ha->fw_ext_memory_size / 4, fw->ext_mem);
14446 14446 }
14447 14447
14448 14448 /* Get the FC event trace buffer */
14449 14449 if (rval == QL_SUCCESS) {
14450 14450 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14451 14451 (ha->fwfcetracebuf.bp != NULL)) {
14452 14452 uint32_t cnt;
14453 14453 uint32_t *w32 = ha->fwfcetracebuf.bp;
14454 14454
14455 14455 /* Sync DMA buffer. */
14456 14456 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14457 14457 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14458 14458
14459 14459 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14460 14460 fw->fce_trace_buf[cnt] = *w32++;
14461 14461 }
14462 14462 }
14463 14463 }
14464 14464
14465 14465 /* Get the extended trace buffer */
14466 14466 if (rval == QL_SUCCESS) {
14467 14467 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14468 14468 (ha->fwexttracebuf.bp != NULL)) {
14469 14469 uint32_t cnt;
14470 14470 uint32_t *w32 = ha->fwexttracebuf.bp;
14471 14471
14472 14472 /* Sync DMA buffer. */
14473 14473 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14474 14474 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14475 14475
14476 14476 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14477 14477 fw->ext_trace_buf[cnt] = *w32++;
14478 14478 }
14479 14479 }
14480 14480 }
14481 14481
14482 14482 if (rval != QL_SUCCESS) {
14483 14483 EL(ha, "failed=%xh\n", rval);
14484 14484 } else {
14485 14485 /*EMPTY*/
14486 14486 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14487 14487 }
14488 14488
14489 14489 return (rval);
14490 14490 }
14491 14491
14492 14492 /*
14493 14493 * ql_81xx_binary_fw_dump
14494 14494 *
14495 14495 * Input:
14496 14496 * ha: adapter state pointer.
14497 14497 * fw: firmware dump context pointer.
14498 14498 *
14499 14499 * Returns:
14500 14500 * ql local function return status code.
14501 14501 *
14502 14502 * Context:
14503 14503 * Interrupt or Kernel context, no mailbox commands allowed.
14504 14504 */
14505 14505 static int
14506 14506 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14507 14507 {
14508 14508 uint32_t *reg32;
14509 14509 void *bp;
14510 14510 clock_t timer;
14511 14511 int rval = QL_SUCCESS;
14512 14512
14513 14513 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14514 14514
14515 14515 fw->r2h_status = RD32_IO_REG(ha, risc2host);
14516 14516
14517 14517 /* Pause RISC. */
14518 14518 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14519 14519 /* Disable ISP interrupts. */
14520 14520 WRT16_IO_REG(ha, ictrl, 0);
14521 14521
14522 14522 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14523 14523 for (timer = 30000;
14524 14524 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14525 14525 rval == QL_SUCCESS; timer--) {
14526 14526 if (timer) {
14527 14527 drv_usecwait(100);
14528 14528 if (timer % 10000 == 0) {
14529 14529 EL(ha, "risc pause %d\n", timer);
14530 14530 }
14531 14531 } else {
14532 14532 EL(ha, "risc pause timeout\n");
14533 14533 rval = QL_FUNCTION_TIMEOUT;
14534 14534 }
14535 14535 }
14536 14536 }
14537 14537
14538 14538 if (rval == QL_SUCCESS) {
14539 14539
14540 14540 /* Host Interface registers */
14541 14541
14542 14542 /* HostRisc registers. */
14543 14543 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14544 14544 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14545 14545 16, 32);
14546 14546 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14547 14547 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14548 14548
14549 14549 /* PCIe registers. */
14550 14550 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14551 14551 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14552 14552 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14553 14553 3, 32);
14554 14554 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14555 14555 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14556 14556
14557 14557 /* Host interface registers. */
14558 14558 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14559 14559 sizeof (fw->host_reg) / 4, 32);
14560 14560
14561 14561 /* Disable ISP interrupts. */
14562 14562
14563 14563 WRT32_IO_REG(ha, ictrl, 0);
14564 14564 RD32_IO_REG(ha, ictrl);
14565 14565 ADAPTER_STATE_LOCK(ha);
14566 14566 ha->flags &= ~INTERRUPTS_ENABLED;
14567 14567 ADAPTER_STATE_UNLOCK(ha);
14568 14568
14569 14569 /* Shadow registers. */
14570 14570
14571 14571 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14572 14572 RD32_IO_REG(ha, io_base_addr);
14573 14573
14574 14574 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14575 14575 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14576 14576 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14577 14577 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14578 14578
14579 14579 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14580 14580 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14581 14581 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14582 14582 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14583 14583
14584 14584 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14585 14585 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14586 14586 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14587 14587 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14588 14588
14589 14589 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14590 14590 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14591 14591 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14592 14592 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14593 14593
14594 14594 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14595 14595 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14596 14596 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14597 14597 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14598 14598
14599 14599 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14600 14600 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14601 14601 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14602 14602 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14603 14603
14604 14604 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14605 14605 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14606 14606 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14607 14607 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14608 14608
14609 14609 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14610 14610 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14611 14611 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14612 14612 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14613 14613
14614 14614 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14615 14615 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14616 14616 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14617 14617 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14618 14618
14619 14619 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14620 14620 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14621 14621 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14622 14622 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14623 14623
14624 14624 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14625 14625 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14626 14626 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14627 14627 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14628 14628
14629 14629 /* RISC I/O register. */
14630 14630
14631 14631 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14632 14632 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14633 14633 1, 32);
14634 14634
14635 14635 /* Mailbox registers. */
14636 14636
14637 14637 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14638 14638 sizeof (fw->mailbox_reg) / 2, 16);
14639 14639
14640 14640 /* Transfer sequence registers. */
14641 14641
14642 14642 /* XSEQ GP */
14643 14643 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14644 14644 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14645 14645 16, 32);
14646 14646 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14647 14647 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14648 14648 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14649 14649 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14650 14650 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14651 14651 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14652 14652 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14653 14653 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14654 14654 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14655 14655 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14656 14656 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14657 14657 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14658 14658 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14659 14659 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14660 14660
14661 14661 /* XSEQ-0 */
14662 14662 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14663 14663 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14664 14664 16, 32);
14665 14665 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14666 14666 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14667 14667 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14668 14668 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14669 14669
14670 14670 /* XSEQ-1 */
14671 14671 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14672 14672 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14673 14673 16, 32);
14674 14674
14675 14675 /* Receive sequence registers. */
14676 14676
14677 14677 /* RSEQ GP */
14678 14678 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14679 14679 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14680 14680 16, 32);
14681 14681 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14682 14682 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14683 14683 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14684 14684 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14685 14685 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14686 14686 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14687 14687 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14688 14688 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14689 14689 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14690 14690 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14691 14691 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14692 14692 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14693 14693 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14694 14694 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14695 14695
14696 14696 /* RSEQ-0 */
14697 14697 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14698 14698 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14699 14699 16, 32);
14700 14700 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14701 14701 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14702 14702
14703 14703 /* RSEQ-1 */
14704 14704 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14705 14705 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14706 14706 sizeof (fw->rseq_1_reg) / 4, 32);
14707 14707
14708 14708 /* RSEQ-2 */
14709 14709 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14710 14710 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14711 14711 sizeof (fw->rseq_2_reg) / 4, 32);
14712 14712
14713 14713 /* Auxiliary sequencer registers. */
14714 14714
14715 14715 /* ASEQ GP */
14716 14716 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14717 14717 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14718 14718 16, 32);
14719 14719 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14720 14720 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14721 14721 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14722 14722 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14723 14723 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14724 14724 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14725 14725 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14726 14726 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14727 14727 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14728 14728 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14729 14729 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14730 14730 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14731 14731 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14732 14732 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14733 14733
14734 14734 /* ASEQ-0 */
14735 14735 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14736 14736 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14737 14737 16, 32);
14738 14738 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14739 14739 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14740 14740
14741 14741 /* ASEQ-1 */
14742 14742 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14743 14743 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14744 14744 16, 32);
14745 14745
14746 14746 /* ASEQ-2 */
14747 14747 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14748 14748 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14749 14749 16, 32);
14750 14750
14751 14751 /* Command DMA registers. */
14752 14752
14753 14753 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14754 14754 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14755 14755 sizeof (fw->cmd_dma_reg) / 4, 32);
14756 14756
14757 14757 /* Queues. */
14758 14758
14759 14759 /* RequestQ0 */
14760 14760 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14761 14761 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14762 14762 8, 32);
14763 14763 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14764 14764
14765 14765 /* ResponseQ0 */
14766 14766 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14767 14767 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14768 14768 8, 32);
14769 14769 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14770 14770
14771 14771 /* RequestQ1 */
14772 14772 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14773 14773 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14774 14774 8, 32);
14775 14775 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14776 14776
14777 14777 /* Transmit DMA registers. */
14778 14778
14779 14779 /* XMT0 */
14780 14780 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14781 14781 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14782 14782 16, 32);
14783 14783 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14784 14784 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14785 14785
14786 14786 /* XMT1 */
14787 14787 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14788 14788 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14789 14789 16, 32);
14790 14790 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14791 14791 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14792 14792
14793 14793 /* XMT2 */
14794 14794 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14795 14795 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14796 14796 16, 32);
14797 14797 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14798 14798 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14799 14799
14800 14800 /* XMT3 */
14801 14801 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14802 14802 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14803 14803 16, 32);
14804 14804 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14805 14805 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14806 14806
14807 14807 /* XMT4 */
14808 14808 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14809 14809 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14810 14810 16, 32);
14811 14811 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14812 14812 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14813 14813
14814 14814 /* XMT Common */
14815 14815 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14816 14816 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14817 14817 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14818 14818
14819 14819 /* Receive DMA registers. */
14820 14820
14821 14821 /* RCVThread0 */
14822 14822 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14823 14823 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14824 14824 ha->iobase + 0xC0, 16, 32);
14825 14825 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14826 14826 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14827 14827
14828 14828 /* RCVThread1 */
14829 14829 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14830 14830 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14831 14831 ha->iobase + 0xC0, 16, 32);
14832 14832 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14833 14833 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14834 14834
14835 14835 /* RISC registers. */
14836 14836
14837 14837 /* RISC GP */
14838 14838 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14839 14839 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14840 14840 16, 32);
14841 14841 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14842 14842 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14843 14843 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14844 14844 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14845 14845 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14846 14846 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14847 14847 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14848 14848 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14849 14849 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14850 14850 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14851 14851 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14852 14852 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14853 14853 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14854 14854 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14855 14855
14856 14856 /* Local memory controller (LMC) registers. */
14857 14857
14858 14858 /* LMC */
14859 14859 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14860 14860 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14861 14861 16, 32);
14862 14862 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14863 14863 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14864 14864 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14865 14865 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14866 14866 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14867 14867 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14868 14868 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14869 14869 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14870 14870 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14871 14871 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14872 14872 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14873 14873 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14874 14874 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14875 14875 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14876 14876
14877 14877 /* Fibre Protocol Module registers. */
14878 14878
14879 14879 /* FPM hardware */
14880 14880 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14881 14881 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14882 14882 16, 32);
14883 14883 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14884 14884 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14885 14885 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14886 14886 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14887 14887 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14888 14888 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14889 14889 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14890 14890 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14891 14891 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14892 14892 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14893 14893 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14894 14894 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14895 14895 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14896 14896 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14897 14897 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14898 14898 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14899 14899 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14900 14900 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14901 14901 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14902 14902 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14903 14903 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14904 14904 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14905 14905 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14906 14906 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14907 14907 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14908 14908 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14909 14909
14910 14910 /* Frame Buffer registers. */
14911 14911
14912 14912 /* FB hardware */
14913 14913 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14914 14914 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14915 14915 16, 32);
14916 14916 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14917 14917 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14918 14918 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14919 14919 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14920 14920 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14921 14921 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14922 14922 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14923 14923 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14924 14924 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14925 14925 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14926 14926 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14927 14927 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14928 14928 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14929 14929 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14930 14930 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14931 14931 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14932 14932 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14933 14933 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14934 14934 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14935 14935 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14936 14936 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14937 14937 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14938 14938 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14939 14939 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14940 14940 }
14941 14941
14942 14942 /* Get the request queue */
14943 14943 if (rval == QL_SUCCESS) {
14944 14944 uint32_t cnt;
14945 14945 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14946 14946
14947 14947 /* Sync DMA buffer. */
14948 14948 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14949 14949 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14950 14950 DDI_DMA_SYNC_FORKERNEL);
14951 14951
14952 14952 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14953 14953 fw->req_q[cnt] = *w32++;
14954 14954 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14955 14955 }
14956 14956 }
14957 14957
14958 14958 /* Get the response queue */
14959 14959 if (rval == QL_SUCCESS) {
14960 14960 uint32_t cnt;
14961 14961 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14962 14962
14963 14963 /* Sync DMA buffer. */
14964 14964 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14965 14965 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14966 14966 DDI_DMA_SYNC_FORKERNEL);
14967 14967
14968 14968 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14969 14969 fw->rsp_q[cnt] = *w32++;
14970 14970 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14971 14971 }
14972 14972 }
14973 14973
14974 14974 /* Reset RISC. */
14975 14975
14976 14976 ql_reset_chip(ha);
14977 14977
14978 14978 /* Memory. */
14979 14979
14980 14980 if (rval == QL_SUCCESS) {
14981 14981 /* Code RAM. */
14982 14982 rval = ql_read_risc_ram(ha, 0x20000,
14983 14983 sizeof (fw->code_ram) / 4, fw->code_ram);
14984 14984 }
14985 14985 if (rval == QL_SUCCESS) {
14986 14986 /* External Memory. */
14987 14987 rval = ql_read_risc_ram(ha, 0x100000,
14988 14988 ha->fw_ext_memory_size / 4, fw->ext_mem);
14989 14989 }
14990 14990
14991 14991 /* Get the FC event trace buffer */
14992 14992 if (rval == QL_SUCCESS) {
14993 14993 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14994 14994 (ha->fwfcetracebuf.bp != NULL)) {
14995 14995 uint32_t cnt;
14996 14996 uint32_t *w32 = ha->fwfcetracebuf.bp;
14997 14997
14998 14998 /* Sync DMA buffer. */
14999 14999 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
15000 15000 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
15001 15001
15002 15002 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
15003 15003 fw->fce_trace_buf[cnt] = *w32++;
15004 15004 }
15005 15005 }
15006 15006 }
15007 15007
15008 15008 /* Get the extended trace buffer */
15009 15009 if (rval == QL_SUCCESS) {
15010 15010 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
15011 15011 (ha->fwexttracebuf.bp != NULL)) {
15012 15012 uint32_t cnt;
15013 15013 uint32_t *w32 = ha->fwexttracebuf.bp;
15014 15014
15015 15015 /* Sync DMA buffer. */
15016 15016 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
15017 15017 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
15018 15018
15019 15019 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
15020 15020 fw->ext_trace_buf[cnt] = *w32++;
15021 15021 }
15022 15022 }
15023 15023 }
15024 15024
15025 15025 if (rval != QL_SUCCESS) {
15026 15026 EL(ha, "failed=%xh\n", rval);
15027 15027 } else {
15028 15028 /*EMPTY*/
15029 15029 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15030 15030 }
15031 15031
15032 15032 return (rval);
15033 15033 }
15034 15034
15035 15035 /*
15036 15036 * ql_read_risc_ram
15037 15037 * Reads RISC RAM one word at a time.
15038 15038 * Risc interrupts must be disabled when this routine is called.
15039 15039 *
15040 15040 * Input:
15041 15041 * ha: adapter state pointer.
15042 15042 * risc_address: RISC code start address.
15043 15043 * len: Number of words.
15044 15044 * buf: buffer pointer.
15045 15045 *
15046 15046 * Returns:
15047 15047 * ql local function return status code.
15048 15048 *
15049 15049 * Context:
15050 15050 * Interrupt or Kernel context, no mailbox commands allowed.
15051 15051 */
15052 15052 static int
15053 15053 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15054 15054 void *buf)
15055 15055 {
15056 15056 uint32_t cnt;
15057 15057 uint16_t stat;
15058 15058 clock_t timer;
15059 15059 uint16_t *buf16 = (uint16_t *)buf;
15060 15060 uint32_t *buf32 = (uint32_t *)buf;
15061 15061 int rval = QL_SUCCESS;
15062 15062
15063 15063 for (cnt = 0; cnt < len; cnt++, risc_address++) {
15064 15064 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15065 15065 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15066 15066 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15067 15067 if (CFG_IST(ha, CFG_CTRL_8021)) {
15068 15068 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15069 15069 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15070 15070 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15071 15071 } else {
15072 15072 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15073 15073 }
15074 15074 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15075 15075 if (INTERRUPT_PENDING(ha)) {
15076 15076 stat = (uint16_t)
15077 15077 (RD16_IO_REG(ha, risc2host) & 0xff);
15078 15078 if ((stat == 1) || (stat == 0x10)) {
15079 15079 if (CFG_IST(ha, CFG_CTRL_24258081)) {
15080 15080 buf32[cnt] = SHORT_TO_LONG(
15081 15081 RD16_IO_REG(ha,
15082 15082 mailbox_out[2]),
15083 15083 RD16_IO_REG(ha,
15084 15084 mailbox_out[3]));
15085 15085 } else {
15086 15086 buf16[cnt] =
15087 15087 RD16_IO_REG(ha,
15088 15088 mailbox_out[2]);
15089 15089 }
15090 15090
15091 15091 break;
15092 15092 } else if ((stat == 2) || (stat == 0x11)) {
15093 15093 rval = RD16_IO_REG(ha, mailbox_out[0]);
15094 15094 break;
15095 15095 }
15096 15096 if (CFG_IST(ha, CFG_CTRL_8021)) {
15097 15097 ql_8021_clr_hw_intr(ha);
15098 15098 ql_8021_clr_fw_intr(ha);
15099 15099 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15100 15100 WRT32_IO_REG(ha, hccr,
15101 15101 HC24_CLR_RISC_INT);
15102 15102 RD32_IO_REG(ha, hccr);
15103 15103 } else {
15104 15104 WRT16_IO_REG(ha, hccr,
15105 15105 HC_CLR_RISC_INT);
15106 15106 }
15107 15107 }
15108 15108 drv_usecwait(5);
15109 15109 }
15110 15110 if (CFG_IST(ha, CFG_CTRL_8021)) {
15111 15111 ql_8021_clr_hw_intr(ha);
15112 15112 ql_8021_clr_fw_intr(ha);
15113 15113 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15114 15114 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15115 15115 RD32_IO_REG(ha, hccr);
15116 15116 } else {
15117 15117 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15118 15118 WRT16_IO_REG(ha, semaphore, 0);
15119 15119 }
15120 15120
15121 15121 if (timer == 0) {
15122 15122 rval = QL_FUNCTION_TIMEOUT;
15123 15123 }
15124 15124 }
15125 15125
15126 15126 return (rval);
15127 15127 }
15128 15128
15129 15129 /*
15130 15130 * ql_read_regs
15131 15131 * Reads adapter registers to buffer.
15132 15132 *
15133 15133 * Input:
15134 15134 * ha: adapter state pointer.
15135 15135 * buf: buffer pointer.
15136 15136 * reg: start address.
15137 15137 * count: number of registers.
15138 15138 * wds: register size.
15139 15139 *
15140 15140 * Context:
15141 15141 * Interrupt or Kernel context, no mailbox commands allowed.
15142 15142 */
15143 15143 static void *
15144 15144 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15145 15145 uint8_t wds)
15146 15146 {
15147 15147 uint32_t *bp32, *reg32;
15148 15148 uint16_t *bp16, *reg16;
15149 15149 uint8_t *bp8, *reg8;
15150 15150
15151 15151 switch (wds) {
15152 15152 case 32:
15153 15153 bp32 = buf;
15154 15154 reg32 = reg;
15155 15155 while (count--) {
15156 15156 *bp32++ = RD_REG_DWORD(ha, reg32++);
15157 15157 }
15158 15158 return (bp32);
15159 15159 case 16:
15160 15160 bp16 = buf;
15161 15161 reg16 = reg;
15162 15162 while (count--) {
15163 15163 *bp16++ = RD_REG_WORD(ha, reg16++);
15164 15164 }
15165 15165 return (bp16);
15166 15166 case 8:
15167 15167 bp8 = buf;
15168 15168 reg8 = reg;
15169 15169 while (count--) {
15170 15170 *bp8++ = RD_REG_BYTE(ha, reg8++);
15171 15171 }
15172 15172 return (bp8);
15173 15173 default:
15174 15174 EL(ha, "Unknown word size=%d\n", wds);
15175 15175 return (buf);
15176 15176 }
15177 15177 }
15178 15178
15179 15179 static int
15180 15180 ql_save_config_regs(dev_info_t *dip)
15181 15181 {
15182 15182 ql_adapter_state_t *ha;
15183 15183 int ret;
15184 15184 ql_config_space_t chs;
15185 15185 caddr_t prop = "ql-config-space";
15186 15186
15187 15187 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15188 15188 if (ha == NULL) {
15189 15189 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15190 15190 ddi_get_instance(dip));
15191 15191 return (DDI_FAILURE);
15192 15192 }
15193 15193
15194 15194 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15195 15195
15196 15196 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15197 15197 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15198 15198 1) {
15199 15199 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15200 15200 return (DDI_SUCCESS);
15201 15201 }
15202 15202
15203 15203 chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15204 15204 chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15205 15205 PCI_CONF_HEADER);
15206 15206 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15207 15207 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15208 15208 PCI_BCNF_BCNTRL);
15209 15209 }
15210 15210
15211 15211 chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15212 15212 PCI_CONF_CACHE_LINESZ);
15213 15213
15214 15214 chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15215 15215 PCI_CONF_LATENCY_TIMER);
15216 15216
15217 15217 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15218 15218 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15219 15219 PCI_BCNF_LATENCY_TIMER);
15220 15220 }
15221 15221
15222 15222 chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15223 15223 chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15224 15224 chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15225 15225 chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15226 15226 chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15227 15227 chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15228 15228
15229 15229 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15230 15230 ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15231 15231 (uchar_t *)&chs, sizeof (ql_config_space_t));
15232 15232
15233 15233 if (ret != DDI_PROP_SUCCESS) {
15234 15234 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15235 15235 QL_NAME, ddi_get_instance(dip), prop);
15236 15236 return (DDI_FAILURE);
15237 15237 }
15238 15238
15239 15239 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15240 15240
15241 15241 return (DDI_SUCCESS);
15242 15242 }
15243 15243
15244 15244 static int
15245 15245 ql_restore_config_regs(dev_info_t *dip)
15246 15246 {
15247 15247 ql_adapter_state_t *ha;
15248 15248 uint_t elements;
15249 15249 ql_config_space_t *chs_p;
15250 15250 caddr_t prop = "ql-config-space";
15251 15251
15252 15252 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15253 15253 if (ha == NULL) {
15254 15254 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15255 15255 ddi_get_instance(dip));
15256 15256 return (DDI_FAILURE);
15257 15257 }
15258 15258
15259 15259 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15260 15260
15261 15261 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15262 15262 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15263 15263 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15264 15264 (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15265 15265 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15266 15266 return (DDI_FAILURE);
15267 15267 }
15268 15268
15269 15269 ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15270 15270
15271 15271 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15272 15272 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15273 15273 chs_p->chs_bridge_control);
15274 15274 }
15275 15275
15276 15276 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15277 15277 chs_p->chs_cache_line_size);
15278 15278
15279 15279 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15280 15280 chs_p->chs_latency_timer);
15281 15281
15282 15282 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15283 15283 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15284 15284 chs_p->chs_sec_latency_timer);
15285 15285 }
15286 15286
15287 15287 ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15288 15288 ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15289 15289 ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15290 15290 ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15291 15291 ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15292 15292 ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15293 15293
15294 15294 ddi_prop_free(chs_p);
15295 15295
15296 15296 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15297 15297 if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15298 15298 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15299 15299 QL_NAME, ddi_get_instance(dip), prop);
15300 15300 }
15301 15301
15302 15302 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15303 15303
15304 15304 return (DDI_SUCCESS);
15305 15305 }
15306 15306
15307 15307 uint8_t
15308 15308 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15309 15309 {
15310 15310 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15311 15311 return (ddi_get8(ha->sbus_config_handle,
15312 15312 (uint8_t *)(ha->sbus_config_base + off)));
15313 15313 }
15314 15314
15315 15315 #ifdef KERNEL_32
15316 15316 return (pci_config_getb(ha->pci_handle, off));
15317 15317 #else
15318 15318 return (pci_config_get8(ha->pci_handle, off));
15319 15319 #endif
15320 15320 }
15321 15321
15322 15322 uint16_t
15323 15323 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15324 15324 {
15325 15325 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15326 15326 return (ddi_get16(ha->sbus_config_handle,
15327 15327 (uint16_t *)(ha->sbus_config_base + off)));
15328 15328 }
15329 15329
15330 15330 #ifdef KERNEL_32
15331 15331 return (pci_config_getw(ha->pci_handle, off));
15332 15332 #else
15333 15333 return (pci_config_get16(ha->pci_handle, off));
15334 15334 #endif
15335 15335 }
15336 15336
15337 15337 uint32_t
15338 15338 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15339 15339 {
15340 15340 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15341 15341 return (ddi_get32(ha->sbus_config_handle,
15342 15342 (uint32_t *)(ha->sbus_config_base + off)));
15343 15343 }
15344 15344
15345 15345 #ifdef KERNEL_32
15346 15346 return (pci_config_getl(ha->pci_handle, off));
15347 15347 #else
15348 15348 return (pci_config_get32(ha->pci_handle, off));
15349 15349 #endif
15350 15350 }
15351 15351
15352 15352 void
15353 15353 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15354 15354 {
15355 15355 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15356 15356 ddi_put8(ha->sbus_config_handle,
15357 15357 (uint8_t *)(ha->sbus_config_base + off), val);
15358 15358 } else {
15359 15359 #ifdef KERNEL_32
15360 15360 pci_config_putb(ha->pci_handle, off, val);
15361 15361 #else
15362 15362 pci_config_put8(ha->pci_handle, off, val);
15363 15363 #endif
15364 15364 }
15365 15365 }
15366 15366
15367 15367 void
15368 15368 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15369 15369 {
15370 15370 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15371 15371 ddi_put16(ha->sbus_config_handle,
15372 15372 (uint16_t *)(ha->sbus_config_base + off), val);
15373 15373 } else {
15374 15374 #ifdef KERNEL_32
15375 15375 pci_config_putw(ha->pci_handle, off, val);
15376 15376 #else
15377 15377 pci_config_put16(ha->pci_handle, off, val);
15378 15378 #endif
15379 15379 }
15380 15380 }
15381 15381
15382 15382 void
15383 15383 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15384 15384 {
15385 15385 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15386 15386 ddi_put32(ha->sbus_config_handle,
15387 15387 (uint32_t *)(ha->sbus_config_base + off), val);
15388 15388 } else {
15389 15389 #ifdef KERNEL_32
15390 15390 pci_config_putl(ha->pci_handle, off, val);
15391 15391 #else
15392 15392 pci_config_put32(ha->pci_handle, off, val);
15393 15393 #endif
15394 15394 }
15395 15395 }
15396 15396
15397 15397 /*
15398 15398 * ql_halt
15399 15399 * Waits for commands that are running to finish and
15400 15400 * if they do not, commands are aborted.
15401 15401 * Finally the adapter is reset.
15402 15402 *
15403 15403 * Input:
15404 15404 * ha: adapter state pointer.
15405 15405 * pwr: power state.
15406 15406 *
15407 15407 * Context:
15408 15408 * Kernel context.
15409 15409 */
15410 15410 static void
15411 15411 ql_halt(ql_adapter_state_t *ha, int pwr)
15412 15412 {
15413 15413 uint32_t cnt;
15414 15414 ql_tgt_t *tq;
15415 15415 ql_srb_t *sp;
15416 15416 uint16_t index;
15417 15417 ql_link_t *link;
15418 15418
15419 15419 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15420 15420
15421 15421 /* Wait for all commands running to finish. */
15422 15422 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15423 15423 for (link = ha->dev[index].first; link != NULL;
15424 15424 link = link->next) {
15425 15425 tq = link->base_address;
15426 15426 (void) ql_abort_device(ha, tq, 0);
15427 15427
15428 15428 /* Wait for 30 seconds for commands to finish. */
15429 15429 for (cnt = 3000; cnt != 0; cnt--) {
15430 15430 /* Acquire device queue lock. */
15431 15431 DEVICE_QUEUE_LOCK(tq);
15432 15432 if (tq->outcnt == 0) {
15433 15433 /* Release device queue lock. */
15434 15434 DEVICE_QUEUE_UNLOCK(tq);
15435 15435 break;
15436 15436 } else {
15437 15437 /* Release device queue lock. */
15438 15438 DEVICE_QUEUE_UNLOCK(tq);
15439 15439 ql_delay(ha, 10000);
15440 15440 }
15441 15441 }
15442 15442
15443 15443 /* Finish any commands waiting for more status. */
15444 15444 if (ha->status_srb != NULL) {
15445 15445 sp = ha->status_srb;
15446 15446 ha->status_srb = NULL;
15447 15447 sp->cmd.next = NULL;
15448 15448 ql_done(&sp->cmd);
15449 15449 }
15450 15450
15451 15451 /* Abort commands that did not finish. */
15452 15452 if (cnt == 0) {
15453 15453 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15454 15454 cnt++) {
15455 15455 if (ha->pending_cmds.first != NULL) {
15456 15456 ql_start_iocb(ha, NULL);
15457 15457 cnt = 1;
15458 15458 }
15459 15459 sp = ha->outstanding_cmds[cnt];
15460 15460 if (sp != NULL &&
15461 15461 sp->lun_queue->target_queue ==
15462 15462 tq) {
15463 15463 (void) ql_abort((opaque_t)ha,
15464 15464 sp->pkt, 0);
15465 15465 }
15466 15466 }
15467 15467 }
15468 15468 }
15469 15469 }
15470 15470
15471 15471 /* Shutdown IP. */
15472 15472 if (ha->flags & IP_INITIALIZED) {
15473 15473 (void) ql_shutdown_ip(ha);
15474 15474 }
15475 15475
15476 15476 /* Stop all timers. */
15477 15477 ADAPTER_STATE_LOCK(ha);
15478 15478 ha->port_retry_timer = 0;
15479 15479 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15480 15480 ha->watchdog_timer = 0;
15481 15481 ADAPTER_STATE_UNLOCK(ha);
15482 15482
15483 15483 if (pwr == PM_LEVEL_D3) {
15484 15484 ADAPTER_STATE_LOCK(ha);
15485 15485 ha->flags &= ~ONLINE;
15486 15486 ADAPTER_STATE_UNLOCK(ha);
15487 15487
15488 15488 /* Reset ISP chip. */
15489 15489 ql_reset_chip(ha);
15490 15490 }
15491 15491
15492 15492 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15493 15493 }
15494 15494
15495 15495 /*
15496 15496 * ql_get_dma_mem
15497 15497 * Function used to allocate dma memory.
15498 15498 *
15499 15499 * Input:
15500 15500 * ha: adapter state pointer.
15501 15501 * mem: pointer to dma memory object.
15502 15502 * size: size of the request in bytes
15503 15503 *
15504 15504 * Returns:
15505 15505 * qn local function return status code.
15506 15506 *
15507 15507 * Context:
15508 15508 * Kernel context.
15509 15509 */
15510 15510 int
15511 15511 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15512 15512 mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15513 15513 {
15514 15514 int rval;
15515 15515
15516 15516 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15517 15517
15518 15518 mem->size = size;
15519 15519 mem->type = allocation_type;
15520 15520 mem->cookie_count = 1;
15521 15521
15522 15522 switch (alignment) {
15523 15523 case QL_DMA_DATA_ALIGN:
15524 15524 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15525 15525 break;
15526 15526 case QL_DMA_RING_ALIGN:
15527 15527 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15528 15528 break;
15529 15529 default:
15530 15530 EL(ha, "failed, unknown alignment type %x\n", alignment);
15531 15531 break;
15532 15532 }
15533 15533
15534 15534 if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15535 15535 ql_free_phys(ha, mem);
15536 15536 EL(ha, "failed, alloc_phys=%xh\n", rval);
15537 15537 }
15538 15538
15539 15539 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15540 15540
15541 15541 return (rval);
15542 15542 }
15543 15543
15544 15544 /*
15545 15545 * ql_alloc_phys
15546 15546 * Function used to allocate memory and zero it.
15547 15547 * Memory is below 4 GB.
15548 15548 *
15549 15549 * Input:
15550 15550 * ha: adapter state pointer.
15551 15551 * mem: pointer to dma memory object.
15552 15552 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15553 15553 * mem->cookie_count number of segments allowed.
15554 15554 * mem->type memory allocation type.
15555 15555 * mem->size memory size.
15556 15556 * mem->alignment memory alignment.
15557 15557 *
15558 15558 * Returns:
15559 15559 * qn local function return status code.
15560 15560 *
15561 15561 * Context:
15562 15562 * Kernel context.
15563 15563 */
15564 15564 int
15565 15565 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15566 15566 {
15567 15567 size_t rlen;
15568 15568 ddi_dma_attr_t dma_attr;
15569 15569 ddi_device_acc_attr_t acc_attr = ql_dev_acc_attr;
15570 15570
15571 15571 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15572 15572
15573 15573 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15574 15574 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15575 15575
15576 15576 dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15577 15577 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15578 15578
15579 15579 /*
15580 15580 * Workaround for SUN XMITS buffer must end and start on 8 byte
15581 15581 * boundary. Else, hardware will overrun the buffer. Simple fix is
15582 15582 * to make sure buffer has enough room for overrun.
15583 15583 */
15584 15584 if (mem->size & 7) {
15585 15585 mem->size += 8 - (mem->size & 7);
15586 15586 }
15587 15587
15588 15588 mem->flags = DDI_DMA_CONSISTENT;
15589 15589
15590 15590 /*
15591 15591 * Allocate DMA memory for command.
15592 15592 */
15593 15593 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15594 15594 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15595 15595 DDI_SUCCESS) {
15596 15596 EL(ha, "failed, ddi_dma_alloc_handle\n");
15597 15597 mem->dma_handle = NULL;
15598 15598 return (QL_MEMORY_ALLOC_FAILED);
15599 15599 }
15600 15600
15601 15601 switch (mem->type) {
15602 15602 case KERNEL_MEM:
15603 15603 mem->bp = kmem_zalloc(mem->size, sleep);
15604 15604 break;
15605 15605 case BIG_ENDIAN_DMA:
15606 15606 case LITTLE_ENDIAN_DMA:
15607 15607 case NO_SWAP_DMA:
15608 15608 if (mem->type == BIG_ENDIAN_DMA) {
15609 15609 acc_attr.devacc_attr_endian_flags =
15610 15610 DDI_STRUCTURE_BE_ACC;
15611 15611 } else if (mem->type == NO_SWAP_DMA) {
15612 15612 acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15613 15613 }
15614 15614 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15615 15615 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15616 15616 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15617 15617 &mem->acc_handle) == DDI_SUCCESS) {
15618 15618 bzero(mem->bp, mem->size);
15619 15619 /* ensure we got what we asked for (32bit) */
15620 15620 if (dma_attr.dma_attr_addr_hi == NULL) {
15621 15621 if (mem->cookie.dmac_notused != NULL) {
15622 15622 EL(ha, "failed, ddi_dma_mem_alloc "
15623 15623 "returned 64 bit DMA address\n");
15624 15624 ql_free_phys(ha, mem);
15625 15625 return (QL_MEMORY_ALLOC_FAILED);
15626 15626 }
15627 15627 }
15628 15628 } else {
15629 15629 mem->acc_handle = NULL;
15630 15630 mem->bp = NULL;
15631 15631 }
15632 15632 break;
15633 15633 default:
15634 15634 EL(ha, "failed, unknown type=%xh\n", mem->type);
15635 15635 mem->acc_handle = NULL;
15636 15636 mem->bp = NULL;
15637 15637 break;
15638 15638 }
15639 15639
15640 15640 if (mem->bp == NULL) {
15641 15641 EL(ha, "failed, ddi_dma_mem_alloc\n");
15642 15642 ddi_dma_free_handle(&mem->dma_handle);
15643 15643 mem->dma_handle = NULL;
15644 15644 return (QL_MEMORY_ALLOC_FAILED);
15645 15645 }
15646 15646
15647 15647 mem->flags |= DDI_DMA_RDWR;
15648 15648
15649 15649 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15650 15650 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15651 15651 ql_free_phys(ha, mem);
15652 15652 return (QL_MEMORY_ALLOC_FAILED);
15653 15653 }
15654 15654
15655 15655 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15656 15656
15657 15657 return (QL_SUCCESS);
15658 15658 }
15659 15659
15660 15660 /*
15661 15661 * ql_free_phys
15662 15662 * Function used to free physical memory.
15663 15663 *
15664 15664 * Input:
15665 15665 * ha: adapter state pointer.
15666 15666 * mem: pointer to dma memory object.
15667 15667 *
15668 15668 * Context:
15669 15669 * Kernel context.
15670 15670 */
15671 15671 void
15672 15672 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15673 15673 {
15674 15674 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15675 15675
15676 15676 if (mem != NULL && mem->dma_handle != NULL) {
15677 15677 ql_unbind_dma_buffer(ha, mem);
15678 15678 switch (mem->type) {
15679 15679 case KERNEL_MEM:
15680 15680 if (mem->bp != NULL) {
15681 15681 kmem_free(mem->bp, mem->size);
15682 15682 }
15683 15683 break;
15684 15684 case LITTLE_ENDIAN_DMA:
15685 15685 case BIG_ENDIAN_DMA:
15686 15686 case NO_SWAP_DMA:
15687 15687 if (mem->acc_handle != NULL) {
15688 15688 ddi_dma_mem_free(&mem->acc_handle);
15689 15689 mem->acc_handle = NULL;
15690 15690 }
15691 15691 break;
15692 15692 default:
15693 15693 break;
15694 15694 }
15695 15695 mem->bp = NULL;
15696 15696 ddi_dma_free_handle(&mem->dma_handle);
15697 15697 mem->dma_handle = NULL;
15698 15698 }
15699 15699
15700 15700 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15701 15701 }
15702 15702
15703 15703 /*
15704 15704 * ql_alloc_dma_resouce.
15705 15705 * Allocates DMA resource for buffer.
15706 15706 *
15707 15707 * Input:
15708 15708 * ha: adapter state pointer.
15709 15709 * mem: pointer to dma memory object.
15710 15710 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15711 15711 * mem->cookie_count number of segments allowed.
15712 15712 * mem->type memory allocation type.
15713 15713 * mem->size memory size.
15714 15714 * mem->bp pointer to memory or struct buf
15715 15715 *
15716 15716 * Returns:
15717 15717 * qn local function return status code.
15718 15718 *
15719 15719 * Context:
15720 15720 * Kernel context.
15721 15721 */
15722 15722 int
15723 15723 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15724 15724 {
15725 15725 ddi_dma_attr_t dma_attr;
15726 15726
15727 15727 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15728 15728
15729 15729 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15730 15730 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15731 15731 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15732 15732
15733 15733 /*
15734 15734 * Allocate DMA handle for command.
15735 15735 */
15736 15736 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15737 15737 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15738 15738 DDI_SUCCESS) {
15739 15739 EL(ha, "failed, ddi_dma_alloc_handle\n");
15740 15740 mem->dma_handle = NULL;
15741 15741 return (QL_MEMORY_ALLOC_FAILED);
15742 15742 }
15743 15743
15744 15744 mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15745 15745
15746 15746 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15747 15747 EL(ha, "failed, bind_dma_buffer\n");
15748 15748 ddi_dma_free_handle(&mem->dma_handle);
15749 15749 mem->dma_handle = NULL;
15750 15750 return (QL_MEMORY_ALLOC_FAILED);
15751 15751 }
15752 15752
15753 15753 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15754 15754
15755 15755 return (QL_SUCCESS);
15756 15756 }
15757 15757
15758 15758 /*
15759 15759 * ql_free_dma_resource
15760 15760 * Frees DMA resources.
15761 15761 *
15762 15762 * Input:
15763 15763 * ha: adapter state pointer.
15764 15764 * mem: pointer to dma memory object.
15765 15765 * mem->dma_handle DMA memory handle.
15766 15766 *
15767 15767 * Context:
15768 15768 * Kernel context.
15769 15769 */
15770 15770 void
15771 15771 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15772 15772 {
15773 15773 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15774 15774
15775 15775 ql_free_phys(ha, mem);
15776 15776
15777 15777 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15778 15778 }
15779 15779
15780 15780 /*
15781 15781 * ql_bind_dma_buffer
15782 15782 * Binds DMA buffer.
15783 15783 *
15784 15784 * Input:
15785 15785 * ha: adapter state pointer.
15786 15786 * mem: pointer to dma memory object.
15787 15787 * sleep: KM_SLEEP or KM_NOSLEEP.
15788 15788 * mem->dma_handle DMA memory handle.
15789 15789 * mem->cookie_count number of segments allowed.
15790 15790 * mem->type memory allocation type.
15791 15791 * mem->size memory size.
15792 15792 * mem->bp pointer to memory or struct buf
15793 15793 *
15794 15794 * Returns:
15795 15795 * mem->cookies pointer to list of cookies.
15796 15796 * mem->cookie_count number of cookies.
15797 15797 * status success = DDI_DMA_MAPPED
15798 15798 * DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15799 15799 * DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15800 15800 * DDI_DMA_TOOBIG
15801 15801 *
15802 15802 * Context:
15803 15803 * Kernel context.
15804 15804 */
15805 15805 static int
15806 15806 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15807 15807 {
15808 15808 int rval;
15809 15809 ddi_dma_cookie_t *cookiep;
15810 15810 uint32_t cnt = mem->cookie_count;
15811 15811
15812 15812 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15813 15813
15814 15814 if (mem->type == STRUCT_BUF_MEMORY) {
15815 15815 rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15816 15816 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15817 15817 DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15818 15818 } else {
15819 15819 rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15820 15820 mem->size, mem->flags, (sleep == KM_SLEEP) ?
15821 15821 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15822 15822 &mem->cookie_count);
15823 15823 }
15824 15824
15825 15825 if (rval == DDI_DMA_MAPPED) {
15826 15826 if (mem->cookie_count > cnt) {
15827 15827 (void) ddi_dma_unbind_handle(mem->dma_handle);
15828 15828 EL(ha, "failed, cookie_count %d > %d\n",
15829 15829 mem->cookie_count, cnt);
15830 15830 rval = DDI_DMA_TOOBIG;
15831 15831 } else {
15832 15832 if (mem->cookie_count > 1) {
15833 15833 if (mem->cookies = kmem_zalloc(
15834 15834 sizeof (ddi_dma_cookie_t) *
15835 15835 mem->cookie_count, sleep)) {
15836 15836 *mem->cookies = mem->cookie;
15837 15837 cookiep = mem->cookies;
15838 15838 for (cnt = 1; cnt < mem->cookie_count;
15839 15839 cnt++) {
15840 15840 ddi_dma_nextcookie(
15841 15841 mem->dma_handle,
15842 15842 ++cookiep);
15843 15843 }
15844 15844 } else {
15845 15845 (void) ddi_dma_unbind_handle(
15846 15846 mem->dma_handle);
15847 15847 EL(ha, "failed, kmem_zalloc\n");
15848 15848 rval = DDI_DMA_NORESOURCES;
15849 15849 }
15850 15850 } else {
15851 15851 /*
15852 15852 * It has been reported that dmac_size at times
15853 15853 * may be incorrect on sparc machines so for
15854 15854 * sparc machines that only have one segment
15855 15855 * use the buffer size instead.
15856 15856 */
15857 15857 mem->cookies = &mem->cookie;
15858 15858 mem->cookies->dmac_size = mem->size;
15859 15859 }
15860 15860 }
15861 15861 }
15862 15862
15863 15863 if (rval != DDI_DMA_MAPPED) {
15864 15864 EL(ha, "failed=%xh\n", rval);
15865 15865 } else {
15866 15866 /*EMPTY*/
15867 15867 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15868 15868 }
15869 15869
15870 15870 return (rval);
15871 15871 }
15872 15872
15873 15873 /*
15874 15874 * ql_unbind_dma_buffer
15875 15875 * Unbinds DMA buffer.
15876 15876 *
15877 15877 * Input:
15878 15878 * ha: adapter state pointer.
15879 15879 * mem: pointer to dma memory object.
15880 15880 * mem->dma_handle DMA memory handle.
15881 15881 * mem->cookies pointer to cookie list.
15882 15882 * mem->cookie_count number of cookies.
15883 15883 *
15884 15884 * Context:
15885 15885 * Kernel context.
15886 15886 */
15887 15887 /* ARGSUSED */
15888 15888 static void
15889 15889 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15890 15890 {
15891 15891 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15892 15892
15893 15893 (void) ddi_dma_unbind_handle(mem->dma_handle);
15894 15894 if (mem->cookie_count > 1) {
15895 15895 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15896 15896 mem->cookie_count);
↓ open down ↓ |
4014 lines elided |
↑ open up ↑ |
15897 15897 mem->cookies = NULL;
15898 15898 }
15899 15899 mem->cookie_count = 0;
15900 15900
15901 15901 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15902 15902 }
15903 15903
15904 15904 static int
15905 15905 ql_suspend_adapter(ql_adapter_state_t *ha)
15906 15906 {
15907 - clock_t timer = 32 * drv_usectohz(1000000);
15907 + clock_t timer = drv_sectohz(32);
15908 15908
15909 15909 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15910 15910
15911 15911 /*
15912 15912 * First we will claim mbox ownership so that no
15913 15913 * thread using mbox hangs when we disable the
15914 15914 * interrupt in the middle of it.
15915 15915 */
15916 15916 MBX_REGISTER_LOCK(ha);
15917 15917
15918 15918 /* Check for mailbox available, if not wait for signal. */
15919 15919 while (ha->mailbox_flags & MBX_BUSY_FLG) {
15920 15920 ha->mailbox_flags = (uint8_t)
15921 15921 (ha->mailbox_flags | MBX_WANT_FLG);
15922 15922
15923 15923 /* 30 seconds from now */
15924 15924 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15925 15925 timer, TR_CLOCK_TICK) == -1) {
15926 15926
15927 15927 /* Release mailbox register lock. */
15928 15928 MBX_REGISTER_UNLOCK(ha);
15929 15929 EL(ha, "failed, Suspend mbox");
15930 15930 return (QL_FUNCTION_TIMEOUT);
15931 15931 }
15932 15932 }
15933 15933
15934 15934 /* Set busy flag. */
15935 15935 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15936 15936 MBX_REGISTER_UNLOCK(ha);
15937 15937
15938 15938 (void) ql_wait_outstanding(ha);
15939 15939
15940 15940 /*
15941 15941 * here we are sure that there will not be any mbox interrupt.
15942 15942 * So, let's make sure that we return back all the outstanding
15943 15943 * cmds as well as internally queued commands.
15944 15944 */
15945 15945 ql_halt(ha, PM_LEVEL_D0);
15946 15946
15947 15947 if (ha->power_level != PM_LEVEL_D3) {
15948 15948 /* Disable ISP interrupts. */
15949 15949 WRT16_IO_REG(ha, ictrl, 0);
15950 15950 }
15951 15951
15952 15952 ADAPTER_STATE_LOCK(ha);
15953 15953 ha->flags &= ~INTERRUPTS_ENABLED;
15954 15954 ADAPTER_STATE_UNLOCK(ha);
15955 15955
15956 15956 MBX_REGISTER_LOCK(ha);
15957 15957 /* Reset busy status. */
15958 15958 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15959 15959
15960 15960 /* If thread is waiting for mailbox go signal it to start. */
15961 15961 if (ha->mailbox_flags & MBX_WANT_FLG) {
15962 15962 ha->mailbox_flags = (uint8_t)
15963 15963 (ha->mailbox_flags & ~MBX_WANT_FLG);
15964 15964 cv_broadcast(&ha->cv_mbx_wait);
15965 15965 }
15966 15966 /* Release mailbox register lock. */
15967 15967 MBX_REGISTER_UNLOCK(ha);
15968 15968
15969 15969 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15970 15970
15971 15971 return (QL_SUCCESS);
15972 15972 }
15973 15973
15974 15974 /*
15975 15975 * ql_add_link_b
15976 15976 * Add link to the end of the chain.
15977 15977 *
15978 15978 * Input:
15979 15979 * head = Head of link list.
15980 15980 * link = link to be added.
15981 15981 * LOCK must be already obtained.
15982 15982 *
15983 15983 * Context:
15984 15984 * Interrupt or Kernel context, no mailbox commands allowed.
15985 15985 */
15986 15986 void
15987 15987 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15988 15988 {
15989 15989 /* at the end there isn't a next */
15990 15990 link->next = NULL;
15991 15991
15992 15992 if ((link->prev = head->last) == NULL) {
15993 15993 head->first = link;
15994 15994 } else {
15995 15995 head->last->next = link;
15996 15996 }
15997 15997
15998 15998 head->last = link;
15999 15999 link->head = head; /* the queue we're on */
16000 16000 }
16001 16001
16002 16002 /*
16003 16003 * ql_add_link_t
16004 16004 * Add link to the beginning of the chain.
16005 16005 *
16006 16006 * Input:
16007 16007 * head = Head of link list.
16008 16008 * link = link to be added.
16009 16009 * LOCK must be already obtained.
16010 16010 *
16011 16011 * Context:
16012 16012 * Interrupt or Kernel context, no mailbox commands allowed.
16013 16013 */
16014 16014 void
16015 16015 ql_add_link_t(ql_head_t *head, ql_link_t *link)
16016 16016 {
16017 16017 link->prev = NULL;
16018 16018
16019 16019 if ((link->next = head->first) == NULL) {
16020 16020 head->last = link;
16021 16021 } else {
16022 16022 head->first->prev = link;
16023 16023 }
16024 16024
16025 16025 head->first = link;
16026 16026 link->head = head; /* the queue we're on */
16027 16027 }
16028 16028
16029 16029 /*
16030 16030 * ql_remove_link
16031 16031 * Remove a link from the chain.
16032 16032 *
16033 16033 * Input:
16034 16034 * head = Head of link list.
16035 16035 * link = link to be removed.
16036 16036 * LOCK must be already obtained.
16037 16037 *
16038 16038 * Context:
16039 16039 * Interrupt or Kernel context, no mailbox commands allowed.
16040 16040 */
16041 16041 void
16042 16042 ql_remove_link(ql_head_t *head, ql_link_t *link)
16043 16043 {
16044 16044 if (link->prev != NULL) {
16045 16045 if ((link->prev->next = link->next) == NULL) {
16046 16046 head->last = link->prev;
16047 16047 } else {
16048 16048 link->next->prev = link->prev;
16049 16049 }
16050 16050 } else if ((head->first = link->next) == NULL) {
16051 16051 head->last = NULL;
16052 16052 } else {
16053 16053 head->first->prev = NULL;
16054 16054 }
16055 16055
16056 16056 /* not on a queue any more */
16057 16057 link->prev = link->next = NULL;
16058 16058 link->head = NULL;
16059 16059 }
16060 16060
16061 16061 /*
16062 16062 * ql_chg_endian
16063 16063 * Change endianess of byte array.
16064 16064 *
16065 16065 * Input:
16066 16066 * buf = array pointer.
16067 16067 * size = size of array in bytes.
16068 16068 *
16069 16069 * Context:
16070 16070 * Interrupt or Kernel context, no mailbox commands allowed.
16071 16071 */
16072 16072 void
16073 16073 ql_chg_endian(uint8_t buf[], size_t size)
16074 16074 {
16075 16075 uint8_t byte;
16076 16076 size_t cnt1;
16077 16077 size_t cnt;
16078 16078
16079 16079 cnt1 = size - 1;
16080 16080 for (cnt = 0; cnt < size / 2; cnt++) {
16081 16081 byte = buf[cnt1];
16082 16082 buf[cnt1] = buf[cnt];
16083 16083 buf[cnt] = byte;
16084 16084 cnt1--;
16085 16085 }
16086 16086 }
16087 16087
16088 16088 /*
16089 16089 * ql_bstr_to_dec
16090 16090 * Convert decimal byte string to number.
16091 16091 *
16092 16092 * Input:
16093 16093 * s: byte string pointer.
16094 16094 * ans: interger pointer for number.
16095 16095 * size: number of ascii bytes.
16096 16096 *
16097 16097 * Returns:
16098 16098 * success = number of ascii bytes processed.
16099 16099 *
16100 16100 * Context:
16101 16101 * Kernel/Interrupt context.
16102 16102 */
16103 16103 static int
16104 16104 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16105 16105 {
16106 16106 int mul, num, cnt, pos;
16107 16107 char *str;
16108 16108
16109 16109 /* Calculate size of number. */
16110 16110 if (size == 0) {
16111 16111 for (str = s; *str >= '0' && *str <= '9'; str++) {
16112 16112 size++;
16113 16113 }
16114 16114 }
16115 16115
16116 16116 *ans = 0;
16117 16117 for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16118 16118 if (*s >= '0' && *s <= '9') {
16119 16119 num = *s++ - '0';
16120 16120 } else {
16121 16121 break;
16122 16122 }
16123 16123
16124 16124 for (mul = 1, pos = 1; pos < size; pos++) {
16125 16125 mul *= 10;
16126 16126 }
16127 16127 *ans += num * mul;
16128 16128 }
16129 16129
16130 16130 return (cnt);
16131 16131 }
16132 16132
16133 16133 /*
16134 16134 * ql_delay
16135 16135 * Calls delay routine if threads are not suspended, otherwise, busy waits
16136 16136 * Minimum = 1 tick = 10ms
16137 16137 *
16138 16138 * Input:
16139 16139 * dly = delay time in microseconds.
16140 16140 *
16141 16141 * Context:
16142 16142 * Kernel or Interrupt context, no mailbox commands allowed.
16143 16143 */
16144 16144 void
16145 16145 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16146 16146 {
16147 16147 if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16148 16148 drv_usecwait(usecs);
16149 16149 } else {
16150 16150 delay(drv_usectohz(usecs));
16151 16151 }
16152 16152 }
16153 16153
16154 16154 /*
16155 16155 * ql_stall_drv
16156 16156 * Stalls one or all driver instances, waits for 30 seconds.
16157 16157 *
16158 16158 * Input:
16159 16159 * ha: adapter state pointer or NULL for all.
16160 16160 * options: BIT_0 --> leave driver stalled on exit if
16161 16161 * failed.
16162 16162 *
16163 16163 * Returns:
16164 16164 * ql local function return status code.
16165 16165 *
16166 16166 * Context:
16167 16167 * Kernel context.
16168 16168 */
16169 16169 int
16170 16170 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16171 16171 {
16172 16172 ql_link_t *link;
16173 16173 ql_adapter_state_t *ha2;
16174 16174 uint32_t timer;
16175 16175
16176 16176 QL_PRINT_3(CE_CONT, "started\n");
16177 16177
16178 16178 /* Wait for 30 seconds for daemons unstall. */
16179 16179 timer = 3000;
16180 16180 link = ha == NULL ? ql_hba.first : &ha->hba;
16181 16181 while (link != NULL && timer) {
16182 16182 ha2 = link->base_address;
16183 16183
16184 16184 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16185 16185
16186 16186 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16187 16187 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16188 16188 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16189 16189 ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16190 16190 link = ha == NULL ? link->next : NULL;
16191 16191 continue;
16192 16192 }
16193 16193
16194 16194 ql_delay(ha2, 10000);
16195 16195 timer--;
16196 16196 link = ha == NULL ? ql_hba.first : &ha->hba;
16197 16197 }
16198 16198
16199 16199 if (ha2 != NULL && timer == 0) {
16200 16200 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16201 16201 ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16202 16202 "unstalled"));
16203 16203 if (options & BIT_0) {
16204 16204 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16205 16205 }
16206 16206 return (QL_FUNCTION_TIMEOUT);
16207 16207 }
16208 16208
16209 16209 QL_PRINT_3(CE_CONT, "done\n");
16210 16210
16211 16211 return (QL_SUCCESS);
16212 16212 }
16213 16213
16214 16214 /*
16215 16215 * ql_restart_driver
16216 16216 * Restarts one or all driver instances.
16217 16217 *
16218 16218 * Input:
16219 16219 * ha: adapter state pointer or NULL for all.
16220 16220 *
16221 16221 * Context:
16222 16222 * Kernel context.
16223 16223 */
16224 16224 void
16225 16225 ql_restart_driver(ql_adapter_state_t *ha)
16226 16226 {
16227 16227 ql_link_t *link;
16228 16228 ql_adapter_state_t *ha2;
16229 16229 uint32_t timer;
16230 16230
16231 16231 QL_PRINT_3(CE_CONT, "started\n");
16232 16232
16233 16233 /* Tell all daemons to unstall. */
16234 16234 link = ha == NULL ? ql_hba.first : &ha->hba;
16235 16235 while (link != NULL) {
16236 16236 ha2 = link->base_address;
16237 16237
16238 16238 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16239 16239
16240 16240 link = ha == NULL ? link->next : NULL;
16241 16241 }
16242 16242
16243 16243 /* Wait for 30 seconds for all daemons unstall. */
16244 16244 timer = 3000;
16245 16245 link = ha == NULL ? ql_hba.first : &ha->hba;
16246 16246 while (link != NULL && timer) {
16247 16247 ha2 = link->base_address;
16248 16248
16249 16249 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16250 16250 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16251 16251 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16252 16252 QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16253 16253 ha2->instance, ha2->vp_index);
16254 16254 ql_restart_queues(ha2);
16255 16255 link = ha == NULL ? link->next : NULL;
16256 16256 continue;
16257 16257 }
16258 16258
16259 16259 QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16260 16260 ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16261 16261
16262 16262 ql_delay(ha2, 10000);
16263 16263 timer--;
16264 16264 link = ha == NULL ? ql_hba.first : &ha->hba;
16265 16265 }
16266 16266
16267 16267 QL_PRINT_3(CE_CONT, "done\n");
16268 16268 }
16269 16269
16270 16270 /*
16271 16271 * ql_setup_interrupts
16272 16272 * Sets up interrupts based on the HBA's and platform's
16273 16273 * capabilities (e.g., legacy / MSI / FIXED).
16274 16274 *
16275 16275 * Input:
16276 16276 * ha = adapter state pointer.
16277 16277 *
16278 16278 * Returns:
16279 16279 * DDI_SUCCESS or DDI_FAILURE.
16280 16280 *
16281 16281 * Context:
16282 16282 * Kernel context.
16283 16283 */
16284 16284 static int
16285 16285 ql_setup_interrupts(ql_adapter_state_t *ha)
16286 16286 {
16287 16287 int32_t rval = DDI_FAILURE;
16288 16288 int32_t i;
16289 16289 int32_t itypes = 0;
16290 16290
16291 16291 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16292 16292
16293 16293 /*
16294 16294 * The Solaris Advanced Interrupt Functions (aif) are only
16295 16295 * supported on s10U1 or greater.
16296 16296 */
16297 16297 if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16298 16298 EL(ha, "interrupt framework is not supported or is "
16299 16299 "disabled, using legacy\n");
16300 16300 return (ql_legacy_intr(ha));
16301 16301 } else if (ql_os_release_level == 10) {
16302 16302 /*
16303 16303 * See if the advanced interrupt functions (aif) are
16304 16304 * in the kernel
16305 16305 */
16306 16306 void *fptr = (void *)&ddi_intr_get_supported_types;
16307 16307
16308 16308 if (fptr == NULL) {
16309 16309 EL(ha, "aif is not supported, using legacy "
16310 16310 "interrupts (rev)\n");
16311 16311 return (ql_legacy_intr(ha));
16312 16312 }
16313 16313 }
16314 16314
16315 16315 /* See what types of interrupts this HBA and platform support */
16316 16316 if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16317 16317 DDI_SUCCESS) {
16318 16318 EL(ha, "get supported types failed, rval=%xh, "
16319 16319 "assuming FIXED\n", i);
16320 16320 itypes = DDI_INTR_TYPE_FIXED;
16321 16321 }
16322 16322
16323 16323 EL(ha, "supported types are: %xh\n", itypes);
16324 16324
16325 16325 if ((itypes & DDI_INTR_TYPE_MSIX) &&
16326 16326 (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16327 16327 EL(ha, "successful MSI-X setup\n");
16328 16328 } else if ((itypes & DDI_INTR_TYPE_MSI) &&
16329 16329 (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16330 16330 EL(ha, "successful MSI setup\n");
16331 16331 } else {
16332 16332 rval = ql_setup_fixed(ha);
16333 16333 }
16334 16334
16335 16335 if (rval != DDI_SUCCESS) {
16336 16336 EL(ha, "failed, aif, rval=%xh\n", rval);
16337 16337 } else {
16338 16338 /*EMPTY*/
16339 16339 QL_PRINT_3(CE_CONT, "(%d): done\n");
16340 16340 }
16341 16341
16342 16342 return (rval);
16343 16343 }
16344 16344
16345 16345 /*
16346 16346 * ql_setup_msi
16347 16347 * Set up aif MSI interrupts
16348 16348 *
16349 16349 * Input:
16350 16350 * ha = adapter state pointer.
16351 16351 *
16352 16352 * Returns:
16353 16353 * DDI_SUCCESS or DDI_FAILURE.
16354 16354 *
16355 16355 * Context:
16356 16356 * Kernel context.
16357 16357 */
16358 16358 static int
16359 16359 ql_setup_msi(ql_adapter_state_t *ha)
16360 16360 {
16361 16361 int32_t count = 0;
16362 16362 int32_t avail = 0;
16363 16363 int32_t actual = 0;
16364 16364 int32_t msitype = DDI_INTR_TYPE_MSI;
16365 16365 int32_t ret;
16366 16366 ql_ifunc_t itrfun[10] = {0};
16367 16367
16368 16368 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16369 16369
16370 16370 if (ql_disable_msi != 0) {
16371 16371 EL(ha, "MSI is disabled by user\n");
16372 16372 return (DDI_FAILURE);
16373 16373 }
16374 16374
16375 16375 /* MSI support is only suported on 24xx HBA's. */
16376 16376 if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16377 16377 EL(ha, "HBA does not support MSI\n");
16378 16378 return (DDI_FAILURE);
16379 16379 }
16380 16380
16381 16381 /* Get number of MSI interrupts the system supports */
16382 16382 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16383 16383 DDI_SUCCESS) || count == 0) {
16384 16384 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16385 16385 return (DDI_FAILURE);
16386 16386 }
16387 16387
16388 16388 /* Get number of available MSI interrupts */
16389 16389 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16390 16390 DDI_SUCCESS) || avail == 0) {
16391 16391 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16392 16392 return (DDI_FAILURE);
16393 16393 }
16394 16394
16395 16395 /* MSI requires only 1. */
16396 16396 count = 1;
16397 16397 itrfun[0].ifunc = &ql_isr_aif;
16398 16398
16399 16399 /* Allocate space for interrupt handles */
16400 16400 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16401 16401 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16402 16402
16403 16403 ha->iflags |= IFLG_INTR_MSI;
16404 16404
16405 16405 /* Allocate the interrupts */
16406 16406 if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16407 16407 &actual, 0)) != DDI_SUCCESS || actual < count) {
16408 16408 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16409 16409 "actual=%xh\n", ret, count, actual);
16410 16410 ql_release_intr(ha);
16411 16411 return (DDI_FAILURE);
16412 16412 }
16413 16413
16414 16414 ha->intr_cnt = actual;
16415 16415
16416 16416 /* Get interrupt priority */
16417 16417 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16418 16418 DDI_SUCCESS) {
16419 16419 EL(ha, "failed, get_pri ret=%xh\n", ret);
16420 16420 ql_release_intr(ha);
16421 16421 return (ret);
16422 16422 }
16423 16423
16424 16424 /* Add the interrupt handler */
16425 16425 if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16426 16426 (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16427 16427 EL(ha, "failed, intr_add ret=%xh\n", ret);
16428 16428 ql_release_intr(ha);
16429 16429 return (ret);
16430 16430 }
16431 16431
16432 16432 /* Setup mutexes */
16433 16433 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16434 16434 EL(ha, "failed, mutex init ret=%xh\n", ret);
16435 16435 ql_release_intr(ha);
16436 16436 return (ret);
16437 16437 }
16438 16438
16439 16439 /* Get the capabilities */
16440 16440 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16441 16441
16442 16442 /* Enable interrupts */
16443 16443 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16444 16444 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16445 16445 DDI_SUCCESS) {
16446 16446 EL(ha, "failed, block enable, ret=%xh\n", ret);
16447 16447 ql_destroy_mutex(ha);
16448 16448 ql_release_intr(ha);
16449 16449 return (ret);
16450 16450 }
16451 16451 } else {
16452 16452 if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16453 16453 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16454 16454 ql_destroy_mutex(ha);
16455 16455 ql_release_intr(ha);
16456 16456 return (ret);
16457 16457 }
16458 16458 }
16459 16459
16460 16460 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16461 16461
16462 16462 return (DDI_SUCCESS);
16463 16463 }
16464 16464
16465 16465 /*
16466 16466 * ql_setup_msix
16467 16467 * Set up aif MSI-X interrupts
16468 16468 *
16469 16469 * Input:
16470 16470 * ha = adapter state pointer.
16471 16471 *
16472 16472 * Returns:
16473 16473 * DDI_SUCCESS or DDI_FAILURE.
16474 16474 *
16475 16475 * Context:
16476 16476 * Kernel context.
16477 16477 */
16478 16478 static int
16479 16479 ql_setup_msix(ql_adapter_state_t *ha)
16480 16480 {
16481 16481 uint16_t hwvect;
16482 16482 int32_t count = 0;
16483 16483 int32_t avail = 0;
16484 16484 int32_t actual = 0;
16485 16485 int32_t msitype = DDI_INTR_TYPE_MSIX;
16486 16486 int32_t ret;
16487 16487 uint32_t i;
16488 16488 ql_ifunc_t itrfun[QL_MSIX_MAXAIF] = {0};
16489 16489
16490 16490 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16491 16491
16492 16492 if (ql_disable_msix != 0) {
16493 16493 EL(ha, "MSI-X is disabled by user\n");
16494 16494 return (DDI_FAILURE);
16495 16495 }
16496 16496
16497 16497 /*
16498 16498 * MSI-X support is only available on 24xx HBA's that have
16499 16499 * rev A2 parts (revid = 3) or greater.
16500 16500 */
16501 16501 if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16502 16502 (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16503 16503 (ha->device_id == 0x8021))) {
16504 16504 EL(ha, "HBA does not support MSI-X\n");
16505 16505 return (DDI_FAILURE);
16506 16506 }
16507 16507
16508 16508 if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16509 16509 EL(ha, "HBA does not support MSI-X (revid)\n");
16510 16510 return (DDI_FAILURE);
16511 16511 }
16512 16512
16513 16513 /* Per HP, these HP branded HBA's are not supported with MSI-X */
16514 16514 if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16515 16515 ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16516 16516 EL(ha, "HBA does not support MSI-X (subdevid)\n");
16517 16517 return (DDI_FAILURE);
16518 16518 }
16519 16519
16520 16520 /* Get the number of 24xx/25xx MSI-X h/w vectors */
16521 16521 hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16522 16522 ql_pci_config_get16(ha, 0x7e) :
16523 16523 ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16524 16524
16525 16525 EL(ha, "pcie config space hwvect = %d\n", hwvect);
16526 16526
16527 16527 if (hwvect < QL_MSIX_MAXAIF) {
16528 16528 EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16529 16529 QL_MSIX_MAXAIF, hwvect);
16530 16530 return (DDI_FAILURE);
16531 16531 }
16532 16532
16533 16533 /* Get number of MSI-X interrupts the platform h/w supports */
16534 16534 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16535 16535 DDI_SUCCESS) || count == 0) {
16536 16536 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16537 16537 return (DDI_FAILURE);
16538 16538 }
16539 16539
16540 16540 /* Get number of available system interrupts */
16541 16541 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16542 16542 DDI_SUCCESS) || avail == 0) {
16543 16543 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16544 16544 return (DDI_FAILURE);
16545 16545 }
16546 16546
16547 16547 /* Fill out the intr table */
16548 16548 count = QL_MSIX_MAXAIF;
16549 16549 itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16550 16550 itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16551 16551
16552 16552 /* Allocate space for interrupt handles */
16553 16553 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16554 16554 if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16555 16555 ha->hsize = 0;
16556 16556 EL(ha, "failed, unable to allocate htable space\n");
16557 16557 return (DDI_FAILURE);
16558 16558 }
16559 16559
16560 16560 ha->iflags |= IFLG_INTR_MSIX;
16561 16561
16562 16562 /* Allocate the interrupts */
16563 16563 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16564 16564 DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16565 16565 actual < QL_MSIX_MAXAIF) {
16566 16566 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16567 16567 "actual=%xh\n", ret, count, actual);
16568 16568 ql_release_intr(ha);
16569 16569 return (DDI_FAILURE);
16570 16570 }
16571 16571
16572 16572 ha->intr_cnt = actual;
16573 16573
16574 16574 /* Get interrupt priority */
16575 16575 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16576 16576 DDI_SUCCESS) {
16577 16577 EL(ha, "failed, get_pri ret=%xh\n", ret);
16578 16578 ql_release_intr(ha);
16579 16579 return (ret);
16580 16580 }
16581 16581
16582 16582 /* Add the interrupt handlers */
16583 16583 for (i = 0; i < actual; i++) {
16584 16584 if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16585 16585 (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16586 16586 EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16587 16587 actual, ret);
16588 16588 ql_release_intr(ha);
16589 16589 return (ret);
16590 16590 }
16591 16591 }
16592 16592
16593 16593 /*
16594 16594 * duplicate the rest of the intr's
16595 16595 * ddi_intr_dup_handler() isn't working on x86 just yet...
16596 16596 */
16597 16597 #ifdef __sparc
16598 16598 for (i = actual; i < hwvect; i++) {
16599 16599 if ((ret = ddi_intr_dup_handler(ha->htable[0], (int)i,
16600 16600 &ha->htable[i])) != DDI_SUCCESS) {
16601 16601 EL(ha, "failed, intr_dup#=%xh, act=%xh, ret=%xh\n",
16602 16602 i, actual, ret);
16603 16603 ql_release_intr(ha);
16604 16604 return (ret);
16605 16605 }
16606 16606 }
16607 16607 #endif
16608 16608
16609 16609 /* Setup mutexes */
16610 16610 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16611 16611 EL(ha, "failed, mutex init ret=%xh\n", ret);
16612 16612 ql_release_intr(ha);
16613 16613 return (ret);
16614 16614 }
16615 16615
16616 16616 /* Get the capabilities */
16617 16617 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16618 16618
16619 16619 /* Enable interrupts */
16620 16620 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16621 16621 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16622 16622 DDI_SUCCESS) {
16623 16623 EL(ha, "failed, block enable, ret=%xh\n", ret);
16624 16624 ql_destroy_mutex(ha);
16625 16625 ql_release_intr(ha);
16626 16626 return (ret);
16627 16627 }
16628 16628 } else {
16629 16629 for (i = 0; i < ha->intr_cnt; i++) {
16630 16630 if ((ret = ddi_intr_enable(ha->htable[i])) !=
16631 16631 DDI_SUCCESS) {
16632 16632 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16633 16633 ql_destroy_mutex(ha);
16634 16634 ql_release_intr(ha);
16635 16635 return (ret);
16636 16636 }
16637 16637 }
16638 16638 }
16639 16639
16640 16640 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16641 16641
16642 16642 return (DDI_SUCCESS);
16643 16643 }
16644 16644
16645 16645 /*
16646 16646 * ql_setup_fixed
16647 16647 * Sets up aif FIXED interrupts
16648 16648 *
16649 16649 * Input:
16650 16650 * ha = adapter state pointer.
16651 16651 *
16652 16652 * Returns:
16653 16653 * DDI_SUCCESS or DDI_FAILURE.
16654 16654 *
16655 16655 * Context:
16656 16656 * Kernel context.
16657 16657 */
16658 16658 static int
16659 16659 ql_setup_fixed(ql_adapter_state_t *ha)
16660 16660 {
16661 16661 int32_t count = 0;
16662 16662 int32_t actual = 0;
16663 16663 int32_t ret;
16664 16664 uint32_t i;
16665 16665
16666 16666 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16667 16667
16668 16668 /* Get number of fixed interrupts the system supports */
16669 16669 if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16670 16670 &count)) != DDI_SUCCESS) || count == 0) {
16671 16671 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16672 16672 return (DDI_FAILURE);
16673 16673 }
16674 16674
16675 16675 ha->iflags |= IFLG_INTR_FIXED;
16676 16676
16677 16677 /* Allocate space for interrupt handles */
16678 16678 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16679 16679 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16680 16680
16681 16681 /* Allocate the interrupts */
16682 16682 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16683 16683 0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16684 16684 actual < count) {
16685 16685 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16686 16686 "actual=%xh\n", ret, count, actual);
16687 16687 ql_release_intr(ha);
16688 16688 return (DDI_FAILURE);
16689 16689 }
16690 16690
16691 16691 ha->intr_cnt = actual;
16692 16692
16693 16693 /* Get interrupt priority */
16694 16694 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16695 16695 DDI_SUCCESS) {
16696 16696 EL(ha, "failed, get_pri ret=%xh\n", ret);
16697 16697 ql_release_intr(ha);
16698 16698 return (ret);
16699 16699 }
16700 16700
16701 16701 /* Add the interrupt handlers */
16702 16702 for (i = 0; i < ha->intr_cnt; i++) {
16703 16703 if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16704 16704 (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16705 16705 EL(ha, "failed, intr_add ret=%xh\n", ret);
16706 16706 ql_release_intr(ha);
16707 16707 return (ret);
16708 16708 }
16709 16709 }
16710 16710
16711 16711 /* Setup mutexes */
16712 16712 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16713 16713 EL(ha, "failed, mutex init ret=%xh\n", ret);
16714 16714 ql_release_intr(ha);
16715 16715 return (ret);
16716 16716 }
16717 16717
16718 16718 /* Enable interrupts */
16719 16719 for (i = 0; i < ha->intr_cnt; i++) {
16720 16720 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16721 16721 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16722 16722 ql_destroy_mutex(ha);
16723 16723 ql_release_intr(ha);
16724 16724 return (ret);
16725 16725 }
16726 16726 }
16727 16727
16728 16728 EL(ha, "using FIXED interupts\n");
16729 16729
16730 16730 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16731 16731
16732 16732 return (DDI_SUCCESS);
16733 16733 }
16734 16734
16735 16735 /*
16736 16736 * ql_disable_intr
16737 16737 * Disables interrupts
16738 16738 *
16739 16739 * Input:
16740 16740 * ha = adapter state pointer.
16741 16741 *
16742 16742 * Returns:
16743 16743 *
16744 16744 * Context:
16745 16745 * Kernel context.
16746 16746 */
16747 16747 static void
16748 16748 ql_disable_intr(ql_adapter_state_t *ha)
16749 16749 {
16750 16750 uint32_t i, rval;
16751 16751
16752 16752 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16753 16753
16754 16754 if (!(ha->iflags & IFLG_INTR_AIF)) {
16755 16755
16756 16756 /* Disable legacy interrupts */
16757 16757 (void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16758 16758
16759 16759 } else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16760 16760 (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16761 16761
16762 16762 /* Remove AIF block interrupts (MSI) */
16763 16763 if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16764 16764 != DDI_SUCCESS) {
16765 16765 EL(ha, "failed intr block disable, rval=%x\n", rval);
16766 16766 }
16767 16767
16768 16768 } else {
16769 16769
16770 16770 /* Remove AIF non-block interrupts (fixed). */
16771 16771 for (i = 0; i < ha->intr_cnt; i++) {
16772 16772 if ((rval = ddi_intr_disable(ha->htable[i])) !=
16773 16773 DDI_SUCCESS) {
16774 16774 EL(ha, "failed intr disable, intr#=%xh, "
16775 16775 "rval=%xh\n", i, rval);
16776 16776 }
16777 16777 }
16778 16778 }
16779 16779
16780 16780 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16781 16781 }
16782 16782
16783 16783 /*
16784 16784 * ql_release_intr
16785 16785 * Releases aif legacy interrupt resources
16786 16786 *
16787 16787 * Input:
16788 16788 * ha = adapter state pointer.
16789 16789 *
16790 16790 * Returns:
16791 16791 *
16792 16792 * Context:
16793 16793 * Kernel context.
16794 16794 */
16795 16795 static void
16796 16796 ql_release_intr(ql_adapter_state_t *ha)
16797 16797 {
16798 16798 int32_t i;
16799 16799
16800 16800 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16801 16801
16802 16802 if (!(ha->iflags & IFLG_INTR_AIF)) {
16803 16803 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16804 16804 return;
16805 16805 }
16806 16806
16807 16807 ha->iflags &= ~(IFLG_INTR_AIF);
16808 16808 if (ha->htable != NULL && ha->hsize > 0) {
16809 16809 i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16810 16810 while (i-- > 0) {
16811 16811 if (ha->htable[i] == 0) {
16812 16812 EL(ha, "htable[%x]=0h\n", i);
16813 16813 continue;
16814 16814 }
16815 16815
16816 16816 (void) ddi_intr_disable(ha->htable[i]);
16817 16817
16818 16818 if (i < ha->intr_cnt) {
16819 16819 (void) ddi_intr_remove_handler(ha->htable[i]);
16820 16820 }
16821 16821
16822 16822 (void) ddi_intr_free(ha->htable[i]);
16823 16823 }
16824 16824
16825 16825 kmem_free(ha->htable, ha->hsize);
16826 16826 ha->htable = NULL;
16827 16827 }
16828 16828
16829 16829 ha->hsize = 0;
16830 16830 ha->intr_cnt = 0;
16831 16831 ha->intr_pri = 0;
16832 16832 ha->intr_cap = 0;
16833 16833
16834 16834 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16835 16835 }
16836 16836
16837 16837 /*
16838 16838 * ql_legacy_intr
16839 16839 * Sets up legacy interrupts.
16840 16840 *
16841 16841 * NB: Only to be used if AIF (Advanced Interupt Framework)
16842 16842 * if NOT in the kernel.
16843 16843 *
16844 16844 * Input:
16845 16845 * ha = adapter state pointer.
16846 16846 *
16847 16847 * Returns:
16848 16848 * DDI_SUCCESS or DDI_FAILURE.
16849 16849 *
16850 16850 * Context:
16851 16851 * Kernel context.
16852 16852 */
16853 16853 static int
16854 16854 ql_legacy_intr(ql_adapter_state_t *ha)
16855 16855 {
16856 16856 int rval = DDI_SUCCESS;
16857 16857
16858 16858 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16859 16859
16860 16860 /* Setup mutexes */
16861 16861 if (ql_init_mutex(ha) != DDI_SUCCESS) {
16862 16862 EL(ha, "failed, mutex init\n");
16863 16863 return (DDI_FAILURE);
16864 16864 }
16865 16865
16866 16866 /* Setup standard/legacy interrupt handler */
16867 16867 if (ddi_add_intr(ha->dip, (uint_t)0, &ha->iblock_cookie,
16868 16868 (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16869 16869 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16870 16870 QL_NAME, ha->instance);
16871 16871 ql_destroy_mutex(ha);
16872 16872 rval = DDI_FAILURE;
16873 16873 }
16874 16874
16875 16875 if (rval == DDI_SUCCESS) {
16876 16876 ha->iflags |= IFLG_INTR_LEGACY;
16877 16877 EL(ha, "using legacy interrupts\n");
16878 16878 }
16879 16879
16880 16880 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16881 16881
16882 16882 return (rval);
16883 16883 }
16884 16884
16885 16885 /*
16886 16886 * ql_init_mutex
16887 16887 * Initializes mutex's
16888 16888 *
16889 16889 * Input:
16890 16890 * ha = adapter state pointer.
16891 16891 *
16892 16892 * Returns:
16893 16893 * DDI_SUCCESS or DDI_FAILURE.
16894 16894 *
16895 16895 * Context:
16896 16896 * Kernel context.
16897 16897 */
16898 16898 static int
16899 16899 ql_init_mutex(ql_adapter_state_t *ha)
16900 16900 {
16901 16901 int ret;
16902 16902 void *intr;
16903 16903
16904 16904 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16905 16905
16906 16906 if (ha->iflags & IFLG_INTR_AIF) {
16907 16907 intr = (void *)(uintptr_t)ha->intr_pri;
16908 16908 } else {
16909 16909 /* Get iblock cookies to initialize mutexes */
16910 16910 if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16911 16911 &ha->iblock_cookie)) != DDI_SUCCESS) {
16912 16912 EL(ha, "failed, get_iblock: %xh\n", ret);
16913 16913 return (DDI_FAILURE);
16914 16914 }
16915 16915 intr = (void *)ha->iblock_cookie;
16916 16916 }
16917 16917
16918 16918 /* mutexes to protect the adapter state structure. */
16919 16919 mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16920 16920
16921 16921 /* mutex to protect the ISP response ring. */
16922 16922 mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16923 16923
16924 16924 /* mutex to protect the mailbox registers. */
16925 16925 mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16926 16926
16927 16927 /* power management protection */
16928 16928 mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16929 16929
16930 16930 /* Mailbox wait and interrupt conditional variable. */
16931 16931 cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16932 16932 cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16933 16933
16934 16934 /* mutex to protect the ISP request ring. */
16935 16935 mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16936 16936
16937 16937 /* Unsolicited buffer conditional variable. */
16938 16938 cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16939 16939
16940 16940 mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16941 16941 mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16942 16942
16943 16943 /* Suspended conditional variable. */
16944 16944 cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16945 16945
16946 16946 /* mutex to protect task daemon context. */
16947 16947 mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16948 16948
16949 16949 /* Task_daemon thread conditional variable. */
16950 16950 cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16951 16951
16952 16952 /* mutex to protect diag port manage interface */
16953 16953 mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16954 16954
16955 16955 /* mutex to protect per instance f/w dump flags and buffer */
16956 16956 mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16957 16957
16958 16958 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16959 16959
16960 16960 return (DDI_SUCCESS);
16961 16961 }
16962 16962
16963 16963 /*
16964 16964 * ql_destroy_mutex
16965 16965 * Destroys mutex's
16966 16966 *
16967 16967 * Input:
16968 16968 * ha = adapter state pointer.
16969 16969 *
16970 16970 * Returns:
16971 16971 *
16972 16972 * Context:
16973 16973 * Kernel context.
16974 16974 */
16975 16975 static void
16976 16976 ql_destroy_mutex(ql_adapter_state_t *ha)
16977 16977 {
16978 16978 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16979 16979
16980 16980 mutex_destroy(&ha->dump_mutex);
16981 16981 mutex_destroy(&ha->portmutex);
16982 16982 cv_destroy(&ha->cv_task_daemon);
16983 16983 mutex_destroy(&ha->task_daemon_mutex);
16984 16984 cv_destroy(&ha->cv_dr_suspended);
16985 16985 mutex_destroy(&ha->cache_mutex);
16986 16986 mutex_destroy(&ha->ub_mutex);
16987 16987 cv_destroy(&ha->cv_ub);
16988 16988 mutex_destroy(&ha->req_ring_mutex);
16989 16989 cv_destroy(&ha->cv_mbx_intr);
16990 16990 cv_destroy(&ha->cv_mbx_wait);
16991 16991 mutex_destroy(&ha->pm_mutex);
16992 16992 mutex_destroy(&ha->mbx_mutex);
16993 16993 mutex_destroy(&ha->intr_mutex);
16994 16994 mutex_destroy(&ha->mutex);
16995 16995
16996 16996 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16997 16997 }
16998 16998
16999 16999 /*
17000 17000 * ql_fwmodule_resolve
17001 17001 * Loads and resolves external firmware module and symbols
17002 17002 *
17003 17003 * Input:
17004 17004 * ha: adapter state pointer.
17005 17005 *
17006 17006 * Returns:
17007 17007 * ql local function return status code:
17008 17008 * QL_SUCCESS - external f/w module module and symbols resolved
17009 17009 * QL_FW_NOT_SUPPORTED - Driver does not support ISP type
17010 17010 * QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
17011 17011 * QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
17012 17012 * Context:
17013 17013 * Kernel context.
17014 17014 *
17015 17015 * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time. We
17016 17016 * could switch to a tighter scope around acutal download (and add an extra
17017 17017 * ddi_modopen for module opens that occur before root is mounted).
17018 17018 *
17019 17019 */
17020 17020 uint32_t
17021 17021 ql_fwmodule_resolve(ql_adapter_state_t *ha)
17022 17022 {
17023 17023 int8_t module[128];
17024 17024 int8_t fw_version[128];
17025 17025 uint32_t rval = QL_SUCCESS;
17026 17026 caddr_t code, code02;
17027 17027 uint8_t *p_ucfw;
17028 17028 uint16_t *p_usaddr, *p_uslen;
17029 17029 uint32_t *p_uiaddr, *p_uilen, *p_uifw;
17030 17030 uint32_t *p_uiaddr02, *p_uilen02;
17031 17031 struct fw_table *fwt;
17032 17032 extern struct fw_table fw_table[];
17033 17033
17034 17034 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17035 17035
17036 17036 if (ha->fw_module != NULL) {
17037 17037 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
17038 17038 ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
17039 17039 ha->fw_subminor_version);
17040 17040 return (rval);
17041 17041 }
17042 17042
17043 17043 /* make sure the fw_class is in the fw_table of supported classes */
17044 17044 for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
17045 17045 if (fwt->fw_class == ha->fw_class)
17046 17046 break; /* match */
17047 17047 }
17048 17048 if (fwt->fw_version == NULL) {
17049 17049 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
17050 17050 "in driver's fw_table", QL_NAME, ha->instance,
17051 17051 ha->fw_class);
17052 17052 return (QL_FW_NOT_SUPPORTED);
17053 17053 }
17054 17054
17055 17055 /*
17056 17056 * open the module related to the fw_class
17057 17057 */
17058 17058 (void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17059 17059 ha->fw_class);
17060 17060
17061 17061 ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17062 17062 if (ha->fw_module == NULL) {
17063 17063 cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17064 17064 QL_NAME, ha->instance, module);
17065 17065 return (QL_FWMODLOAD_FAILED);
17066 17066 }
17067 17067
17068 17068 /*
17069 17069 * resolve the fw module symbols, data types depend on fw_class
17070 17070 */
17071 17071
17072 17072 switch (ha->fw_class) {
17073 17073 case 0x2200:
17074 17074 case 0x2300:
17075 17075 case 0x6322:
17076 17076
17077 17077 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17078 17078 NULL)) == NULL) {
17079 17079 rval = QL_FWSYM_NOT_FOUND;
17080 17080 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17081 17081 } else if ((p_usaddr = ddi_modsym(ha->fw_module,
17082 17082 "risc_code_addr01", NULL)) == NULL) {
17083 17083 rval = QL_FWSYM_NOT_FOUND;
17084 17084 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17085 17085 } else if ((p_uslen = ddi_modsym(ha->fw_module,
17086 17086 "risc_code_length01", NULL)) == NULL) {
17087 17087 rval = QL_FWSYM_NOT_FOUND;
17088 17088 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17089 17089 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
17090 17090 "firmware_version", NULL)) == NULL) {
17091 17091 rval = QL_FWSYM_NOT_FOUND;
17092 17092 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17093 17093 }
17094 17094
17095 17095 if (rval == QL_SUCCESS) {
17096 17096 ha->risc_fw[0].code = code;
17097 17097 ha->risc_fw[0].addr = *p_usaddr;
17098 17098 ha->risc_fw[0].length = *p_uslen;
17099 17099
17100 17100 (void) snprintf(fw_version, sizeof (fw_version),
17101 17101 "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17102 17102 }
17103 17103 break;
17104 17104
17105 17105 case 0x2400:
17106 17106 case 0x2500:
17107 17107 case 0x8100:
17108 17108
17109 17109 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17110 17110 NULL)) == NULL) {
17111 17111 rval = QL_FWSYM_NOT_FOUND;
17112 17112 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17113 17113 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17114 17114 "risc_code_addr01", NULL)) == NULL) {
17115 17115 rval = QL_FWSYM_NOT_FOUND;
17116 17116 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17117 17117 } else if ((p_uilen = ddi_modsym(ha->fw_module,
17118 17118 "risc_code_length01", NULL)) == NULL) {
17119 17119 rval = QL_FWSYM_NOT_FOUND;
17120 17120 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17121 17121 } else if ((p_uifw = ddi_modsym(ha->fw_module,
17122 17122 "firmware_version", NULL)) == NULL) {
17123 17123 rval = QL_FWSYM_NOT_FOUND;
17124 17124 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17125 17125 }
17126 17126
17127 17127 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17128 17128 NULL)) == NULL) {
17129 17129 rval = QL_FWSYM_NOT_FOUND;
17130 17130 EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17131 17131 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17132 17132 "risc_code_addr02", NULL)) == NULL) {
17133 17133 rval = QL_FWSYM_NOT_FOUND;
17134 17134 EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17135 17135 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17136 17136 "risc_code_length02", NULL)) == NULL) {
17137 17137 rval = QL_FWSYM_NOT_FOUND;
17138 17138 EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17139 17139 }
17140 17140
17141 17141 if (rval == QL_SUCCESS) {
17142 17142 ha->risc_fw[0].code = code;
17143 17143 ha->risc_fw[0].addr = *p_uiaddr;
17144 17144 ha->risc_fw[0].length = *p_uilen;
17145 17145 ha->risc_fw[1].code = code02;
17146 17146 ha->risc_fw[1].addr = *p_uiaddr02;
17147 17147 ha->risc_fw[1].length = *p_uilen02;
17148 17148
17149 17149 (void) snprintf(fw_version, sizeof (fw_version),
17150 17150 "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17151 17151 }
17152 17152 break;
17153 17153
17154 17154 default:
17155 17155 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17156 17156 rval = QL_FW_NOT_SUPPORTED;
17157 17157 }
17158 17158
17159 17159 if (rval != QL_SUCCESS) {
17160 17160 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17161 17161 "module %s (%x)", QL_NAME, ha->instance, module, rval);
17162 17162 if (ha->fw_module != NULL) {
17163 17163 (void) ddi_modclose(ha->fw_module);
17164 17164 ha->fw_module = NULL;
17165 17165 }
17166 17166 } else {
17167 17167 /*
17168 17168 * check for firmware version mismatch between module and
17169 17169 * compiled in fw_table version.
17170 17170 */
17171 17171
17172 17172 if (strcmp(fwt->fw_version, fw_version) != 0) {
17173 17173
17174 17174 /*
17175 17175 * If f/w / driver version mismatches then
17176 17176 * return a successful status -- however warn
17177 17177 * the user that this is NOT recommended.
17178 17178 */
17179 17179
17180 17180 cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17181 17181 "mismatch for %x: driver-%s module-%s", QL_NAME,
17182 17182 ha->instance, ha->fw_class, fwt->fw_version,
17183 17183 fw_version);
17184 17184
17185 17185 ha->cfg_flags |= CFG_FW_MISMATCH;
17186 17186 } else {
17187 17187 ha->cfg_flags &= ~CFG_FW_MISMATCH;
17188 17188 }
17189 17189 }
17190 17190
17191 17191 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17192 17192
17193 17193 return (rval);
17194 17194 }
17195 17195
17196 17196 /*
17197 17197 * ql_port_state
17198 17198 * Set the state on all adapter ports.
17199 17199 *
17200 17200 * Input:
17201 17201 * ha: parent adapter state pointer.
17202 17202 * state: port state.
17203 17203 * flags: task daemon flags to set.
17204 17204 *
17205 17205 * Context:
17206 17206 * Interrupt or Kernel context, no mailbox commands allowed.
17207 17207 */
17208 17208 void
17209 17209 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17210 17210 {
17211 17211 ql_adapter_state_t *vha;
17212 17212
17213 17213 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17214 17214
17215 17215 TASK_DAEMON_LOCK(ha);
17216 17216 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17217 17217 if (FC_PORT_STATE_MASK(vha->state) != state) {
17218 17218 vha->state = state != FC_STATE_OFFLINE ?
17219 17219 (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17220 17220 vha->task_daemon_flags |= flags;
17221 17221 }
17222 17222 }
17223 17223 ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17224 17224 TASK_DAEMON_UNLOCK(ha);
17225 17225
17226 17226 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17227 17227 }
17228 17228
17229 17229 /*
17230 17230 * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17231 17231 *
17232 17232 * Input: Pointer to the adapter state structure.
17233 17233 * Returns: Success or Failure.
17234 17234 * Context: Kernel context.
17235 17235 */
17236 17236 int
17237 17237 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17238 17238 {
17239 17239 int rval = DDI_SUCCESS;
17240 17240
17241 17241 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17242 17242
17243 17243 ha->el_trace_desc =
17244 17244 (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17245 17245
17246 17246 if (ha->el_trace_desc == NULL) {
17247 17247 cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17248 17248 QL_NAME, ha->instance);
17249 17249 rval = DDI_FAILURE;
17250 17250 } else {
17251 17251 ha->el_trace_desc->next = 0;
17252 17252 ha->el_trace_desc->trace_buffer =
17253 17253 (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17254 17254
17255 17255 if (ha->el_trace_desc->trace_buffer == NULL) {
17256 17256 cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17257 17257 QL_NAME, ha->instance);
17258 17258 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17259 17259 rval = DDI_FAILURE;
17260 17260 } else {
17261 17261 ha->el_trace_desc->trace_buffer_size =
17262 17262 EL_TRACE_BUF_SIZE;
17263 17263 mutex_init(&ha->el_trace_desc->mutex, NULL,
17264 17264 MUTEX_DRIVER, NULL);
17265 17265 }
17266 17266 }
17267 17267
17268 17268 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17269 17269
17270 17270 return (rval);
17271 17271 }
17272 17272
17273 17273 /*
17274 17274 * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17275 17275 *
17276 17276 * Input: Pointer to the adapter state structure.
17277 17277 * Returns: Success or Failure.
17278 17278 * Context: Kernel context.
17279 17279 */
17280 17280 int
17281 17281 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17282 17282 {
17283 17283 int rval = DDI_SUCCESS;
17284 17284
17285 17285 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17286 17286
17287 17287 if (ha->el_trace_desc == NULL) {
17288 17288 cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17289 17289 QL_NAME, ha->instance);
17290 17290 rval = DDI_FAILURE;
17291 17291 } else {
17292 17292 if (ha->el_trace_desc->trace_buffer != NULL) {
17293 17293 kmem_free(ha->el_trace_desc->trace_buffer,
17294 17294 ha->el_trace_desc->trace_buffer_size);
17295 17295 }
17296 17296 mutex_destroy(&ha->el_trace_desc->mutex);
17297 17297 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17298 17298 }
17299 17299
17300 17300 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17301 17301
17302 17302 return (rval);
17303 17303 }
17304 17304
17305 17305 /*
17306 17306 * els_cmd_text - Return a pointer to a string describing the command
17307 17307 *
17308 17308 * Input: els_cmd = the els command opcode.
17309 17309 * Returns: pointer to a string.
17310 17310 * Context: Kernel context.
17311 17311 */
17312 17312 char *
17313 17313 els_cmd_text(int els_cmd)
17314 17314 {
17315 17315 cmd_table_t *entry = &els_cmd_tbl[0];
17316 17316
17317 17317 return (cmd_text(entry, els_cmd));
17318 17318 }
17319 17319
17320 17320 /*
17321 17321 * mbx_cmd_text - Return a pointer to a string describing the command
17322 17322 *
17323 17323 * Input: mbx_cmd = the mailbox command opcode.
17324 17324 * Returns: pointer to a string.
17325 17325 * Context: Kernel context.
17326 17326 */
17327 17327 char *
17328 17328 mbx_cmd_text(int mbx_cmd)
17329 17329 {
17330 17330 cmd_table_t *entry = &mbox_cmd_tbl[0];
17331 17331
17332 17332 return (cmd_text(entry, mbx_cmd));
17333 17333 }
17334 17334
17335 17335 /*
17336 17336 * cmd_text Return a pointer to a string describing the command
17337 17337 *
17338 17338 * Input: entry = the command table
17339 17339 * cmd = the command.
17340 17340 * Returns: pointer to a string.
17341 17341 * Context: Kernel context.
17342 17342 */
17343 17343 char *
17344 17344 cmd_text(cmd_table_t *entry, int cmd)
17345 17345 {
17346 17346 for (; entry->cmd != 0; entry++) {
17347 17347 if (entry->cmd == cmd) {
17348 17348 break;
17349 17349 }
17350 17350 }
17351 17351 return (entry->string);
17352 17352 }
17353 17353
17354 17354 /*
17355 17355 * ql_els_24xx_mbox_cmd_iocb - els request indication.
17356 17356 *
17357 17357 * Input: ha = adapter state pointer.
17358 17358 * srb = scsi request block pointer.
17359 17359 * arg = els passthru entry iocb pointer.
17360 17360 * Returns:
17361 17361 * Context: Kernel context.
17362 17362 */
17363 17363 void
17364 17364 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17365 17365 {
17366 17366 els_descriptor_t els_desc;
17367 17367
17368 17368 /* Extract the ELS information */
17369 17369 ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17370 17370
17371 17371 /* Construct the passthru entry */
17372 17372 ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17373 17373
17374 17374 /* Ensure correct endianness */
17375 17375 ql_isp_els_handle_cmd_endian(ha, srb);
17376 17376 }
17377 17377
17378 17378 /*
17379 17379 * ql_fca_isp_els_request - Extract into an els descriptor the info required
17380 17380 * to build an els_passthru iocb from an fc packet.
17381 17381 *
17382 17382 * Input: ha = adapter state pointer.
17383 17383 * pkt = fc packet pointer
17384 17384 * els_desc = els descriptor pointer
17385 17385 * Returns:
17386 17386 * Context: Kernel context.
17387 17387 */
17388 17388 static void
17389 17389 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17390 17390 els_descriptor_t *els_desc)
17391 17391 {
17392 17392 ls_code_t els;
17393 17393
17394 17394 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17395 17395 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17396 17396
17397 17397 els_desc->els = els.ls_code;
17398 17398
17399 17399 els_desc->els_handle = ha->hba_buf.acc_handle;
17400 17400 els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17401 17401 els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17402 17402 /* if n_port_handle is not < 0x7d use 0 */
17403 17403 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17404 17404 els_desc->n_port_handle = ha->n_port->n_port_handle;
17405 17405 } else {
17406 17406 els_desc->n_port_handle = 0;
17407 17407 }
17408 17408 els_desc->control_flags = 0;
17409 17409 els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17410 17410 /*
17411 17411 * Transmit DSD. This field defines the Fibre Channel Frame payload
17412 17412 * (without the frame header) in system memory.
17413 17413 */
17414 17414 els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17415 17415 els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17416 17416 els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17417 17417
17418 17418 els_desc->rsp_byte_count = pkt->pkt_rsplen;
17419 17419 /*
17420 17420 * Receive DSD. This field defines the ELS response payload buffer
17421 17421 * for the ISP24xx firmware transferring the received ELS
17422 17422 * response frame to a location in host memory.
17423 17423 */
17424 17424 els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17425 17425 els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17426 17426 els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17427 17427 }
17428 17428
17429 17429 /*
17430 17430 * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17431 17431 * using the els descriptor.
17432 17432 *
17433 17433 * Input: ha = adapter state pointer.
17434 17434 * els_desc = els descriptor pointer.
17435 17435 * els_entry = els passthru entry iocb pointer.
17436 17436 * Returns:
17437 17437 * Context: Kernel context.
17438 17438 */
17439 17439 static void
17440 17440 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17441 17441 els_passthru_entry_t *els_entry)
17442 17442 {
17443 17443 uint32_t *ptr32;
17444 17444
17445 17445 /*
17446 17446 * Construct command packet.
17447 17447 */
17448 17448 ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17449 17449 (uint8_t)ELS_PASSTHRU_TYPE);
17450 17450 ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17451 17451 els_desc->n_port_handle);
17452 17452 ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17453 17453 ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17454 17454 (uint32_t)0);
17455 17455 ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17456 17456 els_desc->els);
17457 17457 ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17458 17458 els_desc->d_id.b.al_pa);
17459 17459 ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17460 17460 els_desc->d_id.b.area);
17461 17461 ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17462 17462 els_desc->d_id.b.domain);
17463 17463 ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17464 17464 els_desc->s_id.b.al_pa);
17465 17465 ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17466 17466 els_desc->s_id.b.area);
17467 17467 ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17468 17468 els_desc->s_id.b.domain);
17469 17469 ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17470 17470 els_desc->control_flags);
17471 17471 ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17472 17472 els_desc->rsp_byte_count);
17473 17473 ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17474 17474 els_desc->cmd_byte_count);
17475 17475 /* Load transmit data segments and count. */
17476 17476 ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17477 17477 ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17478 17478 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17479 17479 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17480 17480 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17481 17481 ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17482 17482 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17483 17483 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17484 17484 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17485 17485 }
17486 17486
17487 17487 /*
17488 17488 * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17489 17489 * in host memory.
17490 17490 *
17491 17491 * Input: ha = adapter state pointer.
17492 17492 * srb = scsi request block
17493 17493 * Returns:
17494 17494 * Context: Kernel context.
17495 17495 */
17496 17496 void
17497 17497 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17498 17498 {
17499 17499 ls_code_t els;
17500 17500 fc_packet_t *pkt;
17501 17501 uint8_t *ptr;
17502 17502
17503 17503 pkt = srb->pkt;
17504 17504
17505 17505 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17506 17506 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17507 17507
17508 17508 ptr = (uint8_t *)pkt->pkt_cmd;
17509 17509
17510 17510 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17511 17511 }
17512 17512
17513 17513 /*
17514 17514 * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17515 17515 * in host memory.
17516 17516 * Input: ha = adapter state pointer.
17517 17517 * srb = scsi request block
17518 17518 * Returns:
17519 17519 * Context: Kernel context.
17520 17520 */
17521 17521 void
17522 17522 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17523 17523 {
17524 17524 ls_code_t els;
17525 17525 fc_packet_t *pkt;
17526 17526 uint8_t *ptr;
17527 17527
17528 17528 pkt = srb->pkt;
17529 17529
17530 17530 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17531 17531 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17532 17532
17533 17533 ptr = (uint8_t *)pkt->pkt_resp;
17534 17534 BIG_ENDIAN_32(&els);
17535 17535 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17536 17536 }
17537 17537
17538 17538 /*
17539 17539 * ql_isp_els_handle_endian - els requests/responses must be in big endian
17540 17540 * in host memory.
17541 17541 * Input: ha = adapter state pointer.
17542 17542 * ptr = els request/response buffer pointer.
17543 17543 * ls_code = els command code.
17544 17544 * Returns:
17545 17545 * Context: Kernel context.
17546 17546 */
17547 17547 void
17548 17548 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17549 17549 {
17550 17550 switch (ls_code) {
17551 17551 case LA_ELS_PLOGI: {
17552 17552 BIG_ENDIAN_32(ptr); /* Command Code */
17553 17553 ptr += 4;
17554 17554 BIG_ENDIAN_16(ptr); /* FC-PH version */
17555 17555 ptr += 2;
17556 17556 BIG_ENDIAN_16(ptr); /* b2b credit */
17557 17557 ptr += 2;
17558 17558 BIG_ENDIAN_16(ptr); /* Cmn Feature flags */
17559 17559 ptr += 2;
17560 17560 BIG_ENDIAN_16(ptr); /* Rcv data size */
17561 17561 ptr += 2;
17562 17562 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17563 17563 ptr += 2;
17564 17564 BIG_ENDIAN_16(ptr); /* Rel offset */
17565 17565 ptr += 2;
17566 17566 BIG_ENDIAN_32(ptr); /* E_D_TOV */
17567 17567 ptr += 4; /* Port Name */
17568 17568 ptr += 8; /* Node Name */
17569 17569 ptr += 8; /* Class 1 */
17570 17570 ptr += 16; /* Class 2 */
17571 17571 ptr += 16; /* Class 3 */
17572 17572 BIG_ENDIAN_16(ptr); /* Service options */
17573 17573 ptr += 2;
17574 17574 BIG_ENDIAN_16(ptr); /* Initiator control */
17575 17575 ptr += 2;
17576 17576 BIG_ENDIAN_16(ptr); /* Recipient Control */
17577 17577 ptr += 2;
17578 17578 BIG_ENDIAN_16(ptr); /* Rcv size */
17579 17579 ptr += 2;
17580 17580 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17581 17581 ptr += 2;
17582 17582 BIG_ENDIAN_16(ptr); /* N_Port e2e credit */
17583 17583 ptr += 2;
17584 17584 BIG_ENDIAN_16(ptr); /* Open Seq/Exch */
17585 17585 break;
17586 17586 }
17587 17587 case LA_ELS_PRLI: {
17588 17588 BIG_ENDIAN_32(ptr); /* Command Code/Page length */
17589 17589 ptr += 4; /* Type */
17590 17590 ptr += 2;
17591 17591 BIG_ENDIAN_16(ptr); /* Flags */
17592 17592 ptr += 2;
17593 17593 BIG_ENDIAN_32(ptr); /* Originator Process associator */
17594 17594 ptr += 4;
17595 17595 BIG_ENDIAN_32(ptr); /* Responder Process associator */
17596 17596 ptr += 4;
17597 17597 BIG_ENDIAN_32(ptr); /* Flags */
17598 17598 break;
17599 17599 }
17600 17600 default:
17601 17601 EL(ha, "can't handle els code %x\n", ls_code);
17602 17602 break;
17603 17603 }
17604 17604 }
17605 17605
17606 17606 /*
17607 17607 * ql_n_port_plogi
17608 17608 * In N port 2 N port topology where an N Port has logged in with the
17609 17609 * firmware because it has the N_Port login initiative, we send up
17610 17610 * a plogi by proxy which stimulates the login procedure to continue.
17611 17611 *
17612 17612 * Input:
17613 17613 * ha = adapter state pointer.
17614 17614 * Returns:
17615 17615 *
17616 17616 * Context:
17617 17617 * Kernel context.
17618 17618 */
17619 17619 static int
17620 17620 ql_n_port_plogi(ql_adapter_state_t *ha)
17621 17621 {
17622 17622 int rval;
17623 17623 ql_tgt_t *tq;
17624 17624 ql_head_t done_q = { NULL, NULL };
17625 17625
17626 17626 rval = QL_SUCCESS;
17627 17627
17628 17628 if (ha->topology & QL_N_PORT) {
17629 17629 /* if we're doing this the n_port_handle must be good */
17630 17630 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17631 17631 tq = ql_loop_id_to_queue(ha,
17632 17632 ha->n_port->n_port_handle);
17633 17633 if (tq != NULL) {
17634 17634 (void) ql_send_plogi(ha, tq, &done_q);
17635 17635 } else {
17636 17636 EL(ha, "n_port_handle = %x, tq = %x\n",
17637 17637 ha->n_port->n_port_handle, tq);
17638 17638 }
17639 17639 } else {
17640 17640 EL(ha, "n_port_handle = %x, tq = %x\n",
17641 17641 ha->n_port->n_port_handle, tq);
17642 17642 }
17643 17643 if (done_q.first != NULL) {
17644 17644 ql_done(done_q.first);
17645 17645 }
17646 17646 }
17647 17647 return (rval);
17648 17648 }
17649 17649
17650 17650 /*
17651 17651 * Compare two WWNs. The NAA is omitted for comparison.
17652 17652 *
17653 17653 * Note particularly that the indentation used in this
17654 17654 * function isn't according to Sun recommendations. It
17655 17655 * is indented to make reading a bit easy.
17656 17656 *
17657 17657 * Return Values:
17658 17658 * if first == second return 0
17659 17659 * if first > second return 1
17660 17660 * if first < second return -1
17661 17661 */
17662 17662 int
17663 17663 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17664 17664 {
17665 17665 la_wwn_t t1, t2;
17666 17666 int rval;
17667 17667
17668 17668 EL(ha, "WWPN=%08x%08x\n",
17669 17669 BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17670 17670 EL(ha, "WWPN=%08x%08x\n",
17671 17671 BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17672 17672 /*
17673 17673 * Fibre Channel protocol is big endian, so compare
17674 17674 * as big endian values
17675 17675 */
17676 17676 t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17677 17677 t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17678 17678
17679 17679 t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17680 17680 t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17681 17681
17682 17682 if (t1.i_wwn[0] == t2.i_wwn[0]) {
17683 17683 if (t1.i_wwn[1] == t2.i_wwn[1]) {
17684 17684 rval = 0;
17685 17685 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17686 17686 rval = 1;
17687 17687 } else {
17688 17688 rval = -1;
17689 17689 }
17690 17690 } else {
17691 17691 if (t1.i_wwn[0] > t2.i_wwn[0]) {
17692 17692 rval = 1;
17693 17693 } else {
17694 17694 rval = -1;
17695 17695 }
17696 17696 }
17697 17697 return (rval);
17698 17698 }
17699 17699
17700 17700 /*
17701 17701 * ql_wait_for_td_stop
17702 17702 * Wait for task daemon to stop running. Internal command timeout
17703 17703 * is approximately 30 seconds, so it may help in some corner
17704 17704 * cases to wait that long
17705 17705 *
17706 17706 * Input:
17707 17707 * ha = adapter state pointer.
17708 17708 *
17709 17709 * Returns:
17710 17710 * DDI_SUCCESS or DDI_FAILURE.
17711 17711 *
17712 17712 * Context:
17713 17713 * Kernel context.
17714 17714 */
17715 17715
17716 17716 static int
17717 17717 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17718 17718 {
17719 17719 int rval = DDI_FAILURE;
17720 17720 UINT16 wait_cnt;
17721 17721
17722 17722 for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17723 17723 /* The task daemon clears the stop flag on exit. */
17724 17724 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17725 17725 if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17726 17726 ddi_in_panic()) {
17727 17727 drv_usecwait(10000);
17728 17728 } else {
17729 17729 delay(drv_usectohz(10000));
17730 17730 }
17731 17731 } else {
17732 17732 rval = DDI_SUCCESS;
17733 17733 break;
17734 17734 }
17735 17735 }
17736 17736 return (rval);
17737 17737 }
17738 17738
17739 17739 /*
17740 17740 * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17741 17741 *
17742 17742 * Input: Pointer to the adapter state structure.
17743 17743 * Returns: Success or Failure.
17744 17744 * Context: Kernel context.
17745 17745 */
17746 17746 int
17747 17747 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17748 17748 {
17749 17749 int rval = DDI_SUCCESS;
17750 17750
17751 17751 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17752 17752
17753 17753 ha->nvram_cache =
17754 17754 (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17755 17755 KM_SLEEP);
17756 17756
17757 17757 if (ha->nvram_cache == NULL) {
17758 17758 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17759 17759 " descriptor", QL_NAME, ha->instance);
17760 17760 rval = DDI_FAILURE;
17761 17761 } else {
17762 17762 if (CFG_IST(ha, CFG_CTRL_24258081)) {
17763 17763 ha->nvram_cache->size = sizeof (nvram_24xx_t);
17764 17764 } else {
17765 17765 ha->nvram_cache->size = sizeof (nvram_t);
17766 17766 }
17767 17767 ha->nvram_cache->cache =
17768 17768 (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17769 17769 if (ha->nvram_cache->cache == NULL) {
17770 17770 cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17771 17771 QL_NAME, ha->instance);
17772 17772 kmem_free(ha->nvram_cache,
17773 17773 sizeof (nvram_cache_desc_t));
17774 17774 ha->nvram_cache = 0;
17775 17775 rval = DDI_FAILURE;
17776 17776 } else {
17777 17777 mutex_init(&ha->nvram_cache->mutex, NULL,
17778 17778 MUTEX_DRIVER, NULL);
17779 17779 ha->nvram_cache->valid = 0;
17780 17780 }
17781 17781 }
17782 17782
17783 17783 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17784 17784
17785 17785 return (rval);
17786 17786 }
17787 17787
17788 17788 /*
17789 17789 * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17790 17790 *
17791 17791 * Input: Pointer to the adapter state structure.
17792 17792 * Returns: Success or Failure.
17793 17793 * Context: Kernel context.
17794 17794 */
17795 17795 int
17796 17796 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17797 17797 {
17798 17798 int rval = DDI_SUCCESS;
17799 17799
17800 17800 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17801 17801
17802 17802 if (ha->nvram_cache == NULL) {
17803 17803 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17804 17804 QL_NAME, ha->instance);
17805 17805 rval = DDI_FAILURE;
17806 17806 } else {
17807 17807 if (ha->nvram_cache->cache != NULL) {
17808 17808 kmem_free(ha->nvram_cache->cache,
17809 17809 ha->nvram_cache->size);
17810 17810 }
17811 17811 mutex_destroy(&ha->nvram_cache->mutex);
17812 17812 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17813 17813 }
17814 17814
17815 17815 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17816 17816
17817 17817 return (rval);
17818 17818 }
17819 17819
17820 17820 /*
17821 17821 * ql_process_idc_event - Handle an Inter-Driver Communication async event.
17822 17822 *
17823 17823 * Input: Pointer to the adapter state structure.
17824 17824 * Returns: void
17825 17825 * Context: Kernel context.
17826 17826 */
17827 17827 static void
17828 17828 ql_process_idc_event(ql_adapter_state_t *ha)
17829 17829 {
17830 17830 int rval;
17831 17831
17832 17832 switch (ha->idc_mb[0]) {
17833 17833 case MBA_IDC_NOTIFICATION:
17834 17834 /*
17835 17835 * The informational opcode (idc_mb[2]) can be a
17836 17836 * defined value or the mailbox command being executed
17837 17837 * on another function which stimulated this IDC message.
17838 17838 */
17839 17839 ADAPTER_STATE_LOCK(ha);
17840 17840 switch (ha->idc_mb[2]) {
17841 17841 case IDC_OPC_DRV_START:
17842 17842 if (ha->idc_flash_acc != 0) {
17843 17843 ha->idc_flash_acc--;
17844 17844 if (ha->idc_flash_acc == 0) {
17845 17845 ha->idc_flash_acc_timer = 0;
17846 17846 GLOBAL_HW_UNLOCK();
17847 17847 }
17848 17848 }
17849 17849 if (ha->idc_restart_cnt != 0) {
17850 17850 ha->idc_restart_cnt--;
17851 17851 if (ha->idc_restart_cnt == 0) {
17852 17852 ha->idc_restart_timer = 0;
17853 17853 ADAPTER_STATE_UNLOCK(ha);
17854 17854 TASK_DAEMON_LOCK(ha);
17855 17855 ha->task_daemon_flags &= ~DRIVER_STALL;
17856 17856 TASK_DAEMON_UNLOCK(ha);
17857 17857 ql_restart_queues(ha);
17858 17858 } else {
17859 17859 ADAPTER_STATE_UNLOCK(ha);
17860 17860 }
17861 17861 } else {
17862 17862 ADAPTER_STATE_UNLOCK(ha);
17863 17863 }
17864 17864 break;
17865 17865 case IDC_OPC_FLASH_ACC:
17866 17866 ha->idc_flash_acc_timer = 30;
17867 17867 if (ha->idc_flash_acc == 0) {
17868 17868 GLOBAL_HW_LOCK();
17869 17869 }
17870 17870 ha->idc_flash_acc++;
17871 17871 ADAPTER_STATE_UNLOCK(ha);
17872 17872 break;
17873 17873 case IDC_OPC_RESTART_MPI:
17874 17874 ha->idc_restart_timer = 30;
17875 17875 ha->idc_restart_cnt++;
17876 17876 ADAPTER_STATE_UNLOCK(ha);
17877 17877 TASK_DAEMON_LOCK(ha);
17878 17878 ha->task_daemon_flags |= DRIVER_STALL;
17879 17879 TASK_DAEMON_UNLOCK(ha);
17880 17880 break;
17881 17881 case IDC_OPC_PORT_RESET_MBC:
17882 17882 case IDC_OPC_SET_PORT_CONFIG_MBC:
17883 17883 ha->idc_restart_timer = 30;
17884 17884 ha->idc_restart_cnt++;
17885 17885 ADAPTER_STATE_UNLOCK(ha);
17886 17886 TASK_DAEMON_LOCK(ha);
17887 17887 ha->task_daemon_flags |= DRIVER_STALL;
17888 17888 TASK_DAEMON_UNLOCK(ha);
17889 17889 (void) ql_wait_outstanding(ha);
17890 17890 break;
17891 17891 default:
17892 17892 ADAPTER_STATE_UNLOCK(ha);
17893 17893 EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17894 17894 ha->idc_mb[2]);
17895 17895 break;
17896 17896 }
17897 17897 /*
17898 17898 * If there is a timeout value associated with this IDC
17899 17899 * notification then there is an implied requirement
17900 17900 * that we return an ACK.
17901 17901 */
17902 17902 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17903 17903 rval = ql_idc_ack(ha);
17904 17904 if (rval != QL_SUCCESS) {
17905 17905 EL(ha, "idc_ack status=%xh %xh\n", rval,
17906 17906 ha->idc_mb[2]);
17907 17907 }
17908 17908 }
17909 17909 break;
17910 17910 case MBA_IDC_COMPLETE:
17911 17911 /*
17912 17912 * We don't ACK completions, only these require action.
17913 17913 */
17914 17914 switch (ha->idc_mb[2]) {
17915 17915 case IDC_OPC_PORT_RESET_MBC:
17916 17916 case IDC_OPC_SET_PORT_CONFIG_MBC:
17917 17917 ADAPTER_STATE_LOCK(ha);
17918 17918 if (ha->idc_restart_cnt != 0) {
17919 17919 ha->idc_restart_cnt--;
17920 17920 if (ha->idc_restart_cnt == 0) {
17921 17921 ha->idc_restart_timer = 0;
17922 17922 ADAPTER_STATE_UNLOCK(ha);
17923 17923 TASK_DAEMON_LOCK(ha);
17924 17924 ha->task_daemon_flags &= ~DRIVER_STALL;
17925 17925 TASK_DAEMON_UNLOCK(ha);
17926 17926 ql_restart_queues(ha);
17927 17927 } else {
17928 17928 ADAPTER_STATE_UNLOCK(ha);
17929 17929 }
17930 17930 } else {
17931 17931 ADAPTER_STATE_UNLOCK(ha);
17932 17932 }
17933 17933 break;
17934 17934 default:
17935 17935 break; /* Don't care... */
17936 17936 }
17937 17937 break;
17938 17938 case MBA_IDC_TIME_EXTENDED:
17939 17939 QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17940 17940 "%xh\n", ha->instance, ha->idc_mb[2]);
17941 17941 break;
17942 17942 default:
17943 17943 EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17944 17944 ha->idc_mb[2]);
17945 17945 ADAPTER_STATE_UNLOCK(ha);
17946 17946 break;
17947 17947 }
17948 17948 }
↓ open down ↓ |
2031 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX