[ewg] [PATCH] add backports of iscsi and iser for ofed1.4
Doron Shoham
dorons at Voltaire.COM
Sun Aug 31 03:32:17 PDT 2008
add backports of iscsi and iser for ofed1.4
Signed-off-by: Doron Shoham <dorons at voltaire.com>
---
.../backport/2.6.16_sles10/include/scsi/scsi.h | 7 +
.../backport/2.6.16_sles10_sp1/include/scsi/scsi.h | 7 +
.../backport/2.6.16_sles10_sp2/include/scsi/scsi.h | 7 +
.../backport/2.6.18-EL5.1/include/scsi/scsi.h | 7 +
.../backport/2.6.18-EL5.2/include/scsi/scsi.h | 7 +
.../backport/2.6.18_FC6/include/scsi/scsi.h | 7 +
...1_sync_kernel_code_with_release_2.0-869.2.patch | 4746 ++++++++++
.../iscsi_02_count_fmr_align_violations.patch | 24 +
...scsi_03_compat_patch_for_RHEL5_and_SLES10.patch | 151 +
...scsi_03_copmat_patch_for_RHEL5_and_SLES10.patch | 151 -
.../iser_01_sync_kernel_code_with_2.6.26.patch | 1549 ++++
.../backport/2.6.16_sles10/iser_02_fix_iscsi_if.h | 145 +
...1_sync_kernel_code_with_release_2.0-869.2.patch | 4746 ++++++++++
.../iscsi_02_count_fmr_align_violations.patch | 24 +
...scsi_03_compat_patch_for_RHEL5_and_SLES10.patch | 151 +
...scsi_03_copmat_patch_for_RHEL5_and_SLES10.patch | 151 -
.../iser_01_sync_kernel_code_with_2.6.26.patch | 1549 ++++
.../2.6.16_sles10_sp1/iser_02_fix_iscsi_if.h | 145 +
...1_sync_kernel_code_with_release_2.0-869.2.patch | 4746 ++++++++++
.../iscsi_02_count_fmr_align_violations.patch | 24 +
...scsi_03_compat_patch_for_RHEL5_and_SLES10.patch | 151 +
...scsi_03_copmat_patch_for_RHEL5_and_SLES10.patch | 151 -
.../iser_01_sync_kernel_code_with_2.6.26.patch | 1549 ++++
.../2.6.16_sles10_sp2/iser_02_fix_iscsi_if.h | 145 +
...1_sync_kernel_code_with_release_2.0-869.2.patch | 4746 ++++++++++
.../iscsi_02_count_fmr_align_violations.patch | 24 +
...scsi_03_compat_patch_for_RHEL5_and_SLES10.patch | 151 +
...scsi_03_copmat_patch_for_RHEL5_and_SLES10.patch | 151 -
.../iser_01_sync_kernel_code_with_2.6.26.patch | 1549 ++++
.../backport/2.6.18-EL5.1/iser_02_fix_iscsi_if.h | 145 +
...1_sync_kernel_code_with_release_2.0-869.2.patch | 4746 ++++++++++
.../iscsi_02_count_fmr_align_violations.patch | 24 +
...scsi_03_compat_patch_for_RHEL5_and_SLES10.patch | 151 +
...scsi_03_copmat_patch_for_RHEL5_and_SLES10.patch | 151 -
.../iser_01_sync_kernel_code_with_2.6.26.patch | 1549 ++++
.../backport/2.6.18-EL5.2/iser_02_fix_iscsi_if.h | 145 +
...1_sync_kernel_code_with_release_2.0-869.2.patch | 4746 ++++++++++
.../iscsi_02_count_fmr_align_violations.patch | 24 +
...scsi_03_compat_patch_for_RHEL5_and_SLES10.patch | 151 +
...scsi_03_copmat_patch_for_RHEL5_and_SLES10.patch | 151 -
.../iser_01_sync_kernel_code_with_2.6.26.patch | 1549 ++++
.../backport/2.6.18_FC6/iser_02_fix_iscsi_if.h | 145 +
...iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch | 9402 ++++++++++++++++++++
.../backport/2.6.9_U4/iscsi_02_add_to_2_6_9.patch | 180 +
.../2.6.9_U4/iscsi_03_add_session_wq.patch | 76 +
.../2.6.9_U4/iscsi_04_inet_sock_to_opt.patch | 13 +
.../iscsi_05_release_host_lock_before_eh.patch | 60 +
.../backport/2.6.9_U4/iscsi_06_scsi_addons.patch | 75 +
.../iser_00_sync_kernel_code_with_2.6.26.patch | 1549 ++++
..._da9c0c770e775e655e3f77c96d91ee557b117adb.patch | 44 +
..._d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch | 12 +
..._1548271ece9e9312fd5feb41fd58773b56a71d39.patch | 74 +
..._77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch | 38 +
..._b2c6416736b847b91950bd43cc5153e11a1f83ee.patch | 18 +
..._857ae0bdb72999936a28ce621e38e2e288c485da.patch | 16 +
..._8ad5781ae9702a8f95cfdf30967752e4297613ee.patch | 14 +
..._0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch | 22 +
.../2.6.9_U4/iser_09_fix_inclusion_order.patch | 13 +
.../iser_10_fix_struct_scsi_host_template.patch | 31 +
.../2.6.9_U4/iser_11_add_fmr_unalign_cnt.patch | 25 +
.../backport/2.6.9_U4/iser_12_remove_hdr_max.patch | 25 +
.../iser_13_fix_netlink_kernel_create.patch | 26 +
...4_sync_attribute_container.c_from_ofed1.3.patch | 394 +
.../iser_15_fix_iscsi_free_mgmt_task.patch | 28 +
...iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch | 9402 ++++++++++++++++++++
.../backport/2.6.9_U5/iscsi_02_add_to_2_6_9.patch | 180 +
.../2.6.9_U5/iscsi_03_add_session_wq.patch | 76 +
.../2.6.9_U5/iscsi_04_inet_sock_to_opt.patch | 13 +
.../iscsi_05_release_host_lock_before_eh.patch | 60 +
.../backport/2.6.9_U5/iscsi_06_scsi_addons.patch | 75 +
.../iser_00_sync_kernel_code_with_2.6.26.patch | 1549 ++++
..._da9c0c770e775e655e3f77c96d91ee557b117adb.patch | 44 +
..._d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch | 12 +
..._1548271ece9e9312fd5feb41fd58773b56a71d39.patch | 74 +
..._77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch | 38 +
..._b2c6416736b847b91950bd43cc5153e11a1f83ee.patch | 18 +
..._857ae0bdb72999936a28ce621e38e2e288c485da.patch | 16 +
..._8ad5781ae9702a8f95cfdf30967752e4297613ee.patch | 14 +
..._0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch | 22 +
.../2.6.9_U5/iser_09_fix_inclusion_order.patch | 13 +
.../iser_10_fix_struct_scsi_host_template.patch | 31 +
.../2.6.9_U5/iser_11_add_fmr_unalign_cnt.patch | 25 +
.../backport/2.6.9_U5/iser_12_remove_hdr_max.patch | 25 +
.../iser_13_fix_netlink_kernel_create.patch | 26 +
...4_sync_attribute_container.c_from_ofed1.3.patch | 394 +
.../iser_15_fix_iscsi_free_mgmt_task.patch | 28 +
...iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch | 9402 ++++++++++++++++++++
.../backport/2.6.9_U6/iscsi_02_add_to_2_6_9.patch | 180 +
.../2.6.9_U6/iscsi_03_add_session_wq.patch | 76 +
.../2.6.9_U6/iscsi_04_inet_sock_to_opt.patch | 13 +
.../iscsi_05_release_host_lock_before_eh.patch | 60 +
.../backport/2.6.9_U6/iscsi_06_scsi_addons.patch | 75 +
.../iser_00_sync_kernel_code_with_2.6.26.patch | 1549 ++++
..._da9c0c770e775e655e3f77c96d91ee557b117adb.patch | 44 +
..._d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch | 12 +
..._1548271ece9e9312fd5feb41fd58773b56a71d39.patch | 74 +
..._77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch | 38 +
..._b2c6416736b847b91950bd43cc5153e11a1f83ee.patch | 18 +
..._857ae0bdb72999936a28ce621e38e2e288c485da.patch | 16 +
..._8ad5781ae9702a8f95cfdf30967752e4297613ee.patch | 14 +
..._0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch | 22 +
.../2.6.9_U6/iser_09_fix_inclusion_order.patch | 13 +
.../iser_10_fix_struct_scsi_host_template.patch | 31 +
.../2.6.9_U6/iser_11_add_fmr_unalign_cnt.patch | 25 +
.../backport/2.6.9_U6/iser_12_remove_hdr_max.patch | 25 +
.../iser_13_fix_netlink_kernel_create.patch | 26 +
...4_sync_attribute_container.c_from_ofed1.3.patch | 394 +
.../iser_15_fix_iscsi_free_mgmt_task.patch | 28 +
...iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch | 9402 ++++++++++++++++++++
.../backport/2.6.9_U7/iscsi_02_add_to_2_6_9.patch | 180 +
.../2.6.9_U7/iscsi_03_add_session_wq.patch | 76 +
.../2.6.9_U7/iscsi_04_inet_sock_to_opt.patch | 13 +
.../iscsi_05_release_host_lock_before_eh.patch | 60 +
.../backport/2.6.9_U7/iscsi_06_scsi_addons.patch | 75 +
.../iser_00_sync_kernel_code_with_2.6.26.patch | 1549 ++++
..._da9c0c770e775e655e3f77c96d91ee557b117adb.patch | 44 +
..._d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch | 12 +
..._1548271ece9e9312fd5feb41fd58773b56a71d39.patch | 74 +
..._77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch | 38 +
..._b2c6416736b847b91950bd43cc5153e11a1f83ee.patch | 18 +
..._857ae0bdb72999936a28ce621e38e2e288c485da.patch | 16 +
..._8ad5781ae9702a8f95cfdf30967752e4297613ee.patch | 14 +
..._0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch | 22 +
.../2.6.9_U7/iser_09_fix_inclusion_order.patch | 13 +
.../iser_10_fix_struct_scsi_host_template.patch | 31 +
.../2.6.9_U7/iser_11_add_fmr_unalign_cnt.patch | 25 +
.../backport/2.6.9_U7/iser_12_remove_hdr_max.patch | 25 +
.../iser_13_fix_netlink_kernel_create.patch | 26 +
...4_sync_attribute_container.c_from_ofed1.3.patch | 394 +
.../iser_15_fix_iscsi_free_mgmt_task.patch | 28 +
130 files changed, 88272 insertions(+), 906 deletions(-)
create mode 100644 kernel_addons/backport/2.6.16_sles10/include/scsi/scsi.h
create mode 100644 kernel_addons/backport/2.6.16_sles10_sp1/include/scsi/scsi.h
create mode 100644 kernel_addons/backport/2.6.16_sles10_sp2/include/scsi/scsi.h
create mode 100644 kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi.h
create mode 100644 kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi.h
create mode 100644 kernel_addons/backport/2.6.18_FC6/include/scsi/scsi.h
create mode 100644 kernel_patches/backport/2.6.16_sles10/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10/iscsi_02_count_fmr_align_violations.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
delete mode 100644 kernel_patches/backport/2.6.16_sles10/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10/iser_01_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10/iser_02_fix_iscsi_if.h
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp1/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp1/iscsi_02_count_fmr_align_violations.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
delete mode 100644 kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp1/iser_01_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp1/iser_02_fix_iscsi_if.h
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp2/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp2/iscsi_02_count_fmr_align_violations.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
delete mode 100644 kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp2/iser_01_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.16_sles10_sp2/iser_02_fix_iscsi_if.h
create mode 100644 kernel_patches/backport/2.6.18-EL5.1/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
create mode 100644 kernel_patches/backport/2.6.18-EL5.1/iscsi_02_count_fmr_align_violations.patch
create mode 100644 kernel_patches/backport/2.6.18-EL5.1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
delete mode 100644 kernel_patches/backport/2.6.18-EL5.1/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
create mode 100644 kernel_patches/backport/2.6.18-EL5.1/iser_01_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.18-EL5.1/iser_02_fix_iscsi_if.h
create mode 100644 kernel_patches/backport/2.6.18-EL5.2/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
create mode 100644 kernel_patches/backport/2.6.18-EL5.2/iscsi_02_count_fmr_align_violations.patch
create mode 100644 kernel_patches/backport/2.6.18-EL5.2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
delete mode 100644 kernel_patches/backport/2.6.18-EL5.2/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
create mode 100644 kernel_patches/backport/2.6.18-EL5.2/iser_01_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.18-EL5.2/iser_02_fix_iscsi_if.h
create mode 100644 kernel_patches/backport/2.6.18_FC6/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
create mode 100644 kernel_patches/backport/2.6.18_FC6/iscsi_02_count_fmr_align_violations.patch
create mode 100644 kernel_patches/backport/2.6.18_FC6/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
delete mode 100644 kernel_patches/backport/2.6.18_FC6/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
create mode 100644 kernel_patches/backport/2.6.18_FC6/iser_01_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.18_FC6/iser_02_fix_iscsi_if.h
create mode 100644 kernel_patches/backport/2.6.9_U4/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iscsi_02_add_to_2_6_9.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iscsi_03_add_session_wq.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iscsi_04_inet_sock_to_opt.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iscsi_05_release_host_lock_before_eh.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iscsi_06_scsi_addons.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_00_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_09_fix_inclusion_order.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_10_fix_struct_scsi_host_template.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_11_add_fmr_unalign_cnt.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_12_remove_hdr_max.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_13_fix_netlink_kernel_create.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_14_sync_attribute_container.c_from_ofed1.3.patch
create mode 100644 kernel_patches/backport/2.6.9_U4/iser_15_fix_iscsi_free_mgmt_task.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iscsi_02_add_to_2_6_9.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iscsi_03_add_session_wq.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iscsi_04_inet_sock_to_opt.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iscsi_05_release_host_lock_before_eh.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iscsi_06_scsi_addons.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_00_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_09_fix_inclusion_order.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_10_fix_struct_scsi_host_template.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_11_add_fmr_unalign_cnt.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_12_remove_hdr_max.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_13_fix_netlink_kernel_create.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_14_sync_attribute_container.c_from_ofed1.3.patch
create mode 100644 kernel_patches/backport/2.6.9_U5/iser_15_fix_iscsi_free_mgmt_task.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iscsi_02_add_to_2_6_9.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iscsi_03_add_session_wq.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iscsi_04_inet_sock_to_opt.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iscsi_05_release_host_lock_before_eh.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iscsi_06_scsi_addons.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_00_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_09_fix_inclusion_order.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_10_fix_struct_scsi_host_template.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_11_add_fmr_unalign_cnt.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_12_remove_hdr_max.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_13_fix_netlink_kernel_create.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_14_sync_attribute_container.c_from_ofed1.3.patch
create mode 100644 kernel_patches/backport/2.6.9_U6/iser_15_fix_iscsi_free_mgmt_task.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iscsi_02_add_to_2_6_9.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iscsi_03_add_session_wq.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iscsi_04_inet_sock_to_opt.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iscsi_05_release_host_lock_before_eh.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iscsi_06_scsi_addons.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_00_sync_kernel_code_with_2.6.26.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_09_fix_inclusion_order.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_10_fix_struct_scsi_host_template.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_11_add_fmr_unalign_cnt.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_12_remove_hdr_max.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_13_fix_netlink_kernel_create.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_14_sync_attribute_container.c_from_ofed1.3.patch
create mode 100644 kernel_patches/backport/2.6.9_U7/iser_15_fix_iscsi_free_mgmt_task.patch
diff --git a/kernel_addons/backport/2.6.16_sles10/include/scsi/scsi.h b/kernel_addons/backport/2.6.16_sles10/include/scsi/scsi.h
new file mode 100644
index 0000000..a4d7176
--- /dev/null
+++ b/kernel_addons/backport/2.6.16_sles10/include/scsi/scsi.h
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_H_BACKPORT
+#define SCSI_SCSI_H_BACKPORT
+
+#include_next <scsi/scsi.h>
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+#endif
diff --git a/kernel_addons/backport/2.6.16_sles10_sp1/include/scsi/scsi.h b/kernel_addons/backport/2.6.16_sles10_sp1/include/scsi/scsi.h
new file mode 100644
index 0000000..a4d7176
--- /dev/null
+++ b/kernel_addons/backport/2.6.16_sles10_sp1/include/scsi/scsi.h
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_H_BACKPORT
+#define SCSI_SCSI_H_BACKPORT
+
+#include_next <scsi/scsi.h>
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+#endif
diff --git a/kernel_addons/backport/2.6.16_sles10_sp2/include/scsi/scsi.h b/kernel_addons/backport/2.6.16_sles10_sp2/include/scsi/scsi.h
new file mode 100644
index 0000000..a4d7176
--- /dev/null
+++ b/kernel_addons/backport/2.6.16_sles10_sp2/include/scsi/scsi.h
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_H_BACKPORT
+#define SCSI_SCSI_H_BACKPORT
+
+#include_next <scsi/scsi.h>
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+#endif
diff --git a/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi.h b/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi.h
new file mode 100644
index 0000000..a4d7176
--- /dev/null
+++ b/kernel_addons/backport/2.6.18-EL5.1/include/scsi/scsi.h
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_H_BACKPORT
+#define SCSI_SCSI_H_BACKPORT
+
+#include_next <scsi/scsi.h>
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+#endif
diff --git a/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi.h b/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi.h
new file mode 100644
index 0000000..a4d7176
--- /dev/null
+++ b/kernel_addons/backport/2.6.18-EL5.2/include/scsi/scsi.h
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_H_BACKPORT
+#define SCSI_SCSI_H_BACKPORT
+
+#include_next <scsi/scsi.h>
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+#endif
diff --git a/kernel_addons/backport/2.6.18_FC6/include/scsi/scsi.h b/kernel_addons/backport/2.6.18_FC6/include/scsi/scsi.h
new file mode 100644
index 0000000..a4d7176
--- /dev/null
+++ b/kernel_addons/backport/2.6.18_FC6/include/scsi/scsi.h
@@ -0,0 +1,7 @@
+#ifndef SCSI_SCSI_H_BACKPORT
+#define SCSI_SCSI_H_BACKPORT
+
+#include_next <scsi/scsi.h>
+
+#define SCSI_MAX_VARLEN_CDB_SIZE 260
+#endif
diff --git a/kernel_patches/backport/2.6.16_sles10/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch b/kernel_patches/backport/2.6.16_sles10/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
new file mode 100644
index 0000000..cd24137
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
@@ -0,0 +1,4746 @@
+From 89ac09ec66db75fbda1bd77918066fb2ddebac38 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Mon, 25 Aug 2008 16:16:26 +0300
+Subject: [PATCH] iscsi_01_sync_kernel_code_with_release_2.0-869.2
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 529 +++++++-------
+ drivers/scsi/iscsi_tcp.h | 7 +-
+ drivers/scsi/libiscsi.c | 1457 +++++++++++++++--------------------
+ drivers/scsi/scsi_transport_iscsi.c | 500 +++---------
+ include/scsi/libiscsi.h | 108 ++--
+ include/scsi/scsi_transport_iscsi.h | 93 ++--
+ 6 files changed, 1120 insertions(+), 1574 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..8a17867 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -64,10 +64,6 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -498,63 +494,58 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- tcp_task->r2t = NULL;
++ tcp_ctask->r2t = NULL;
+ }
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (tcp_ctask->exp_datasn != datasn) {
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+ return ISCSI_ERR_DATASN;
+ }
+
+- tcp_task->exp_datasn++;
++ tcp_ctask->exp_datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ __FUNCTION__, tcp_ctask->data_offset,
++ tcp_conn->in.datalen, scsi_bufflen(sc));
+ return ISCSI_ERR_DATA_OFFSET;
+ }
+
+@@ -567,8 +558,8 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
++ res_count <= scsi_bufflen(sc)))
++ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+@@ -582,7 +573,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,7 +583,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -603,8 +594,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -624,14 +615,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+@@ -644,23 +635,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_datasn != r2tsn){
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+ return ISCSI_ERR_R2TSN;
+ }
+
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
++ if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ ctask->itt);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+@@ -668,7 +659,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ if (r2t->data_length == 0) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -679,12 +670,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
++ if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ r2t->data_offset, scsi_bufflen(ctask->sc));
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -692,13 +683,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->exp_datasn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+ conn->r2t_pdus_cnt++;
+
+- iscsi_requeue_task(task);
++ iscsi_requeue_ctask(ctask);
+ return 0;
+ }
+
+@@ -741,8 +732,10 @@ static int
+ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+@@ -760,7 +753,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
++ rc = iscsi_verify_itt(conn, hdr, &itt);
+ if (rc)
+ return rc;
+
+@@ -769,21 +762,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
++ ctask = session->cmds[itt];
+ spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
++ rc = iscsi_data_rsp(conn, ctask);
++ spin_unlock(&conn->session->lock);
++ if (rc)
++ return rc;
+ if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+@@ -798,21 +785,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
++ tcp_ctask->data_offset,
+ tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
+- return rc;
++ return iscsi_segment_seek_sg(&tcp_conn->in.segment,
++ scsi_sglist(ctask->sc),
++ scsi_sg_count(ctask->sc),
++ tcp_ctask->data_offset,
++ tcp_conn->in.datalen,
++ iscsi_tcp_process_data_in,
++ rx_hash);
+ }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+@@ -821,17 +804,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
+- else
++ else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ spin_lock(&session->lock);
++ rc = iscsi_r2t_rsp(conn, ctask);
++ spin_unlock(&session->lock);
++ } else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -1193,7 +1174,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
++ debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ conn->hdrdgst_en? ", digest enabled" : "");
+
+ /* Clear the data segment - needs to be filled in by the
+@@ -1202,7 +1183,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+
+ /* If header digest is enabled, compute the CRC and
+ * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
++ * sure that both iscsi_tcp_ctask and mtask have
+ * sufficient room.
+ */
+ if (conn->hdrdgst_en) {
+@@ -1234,7 +1215,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
++ debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ tcp_conn, offset, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+@@ -1259,7 +1240,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
++ debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+ /* Make sure the datalen matches what the caller
+@@ -1277,7 +1258,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1288,7 +1269,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ * Called under connection lock.
+ **/
+ static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -1305,8 +1286,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1324,76 +1305,87 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct scsi_cmnd *sc = ctask->sc;
+ int err;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
+- */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
+-
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+-
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
+- }
+-
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++ tcp_ctask->sent = 0;
++ tcp_ctask->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
++ conn->id, ctask->itt, ctask->imm_count,
++ ctask->unsol_count);
++ iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+
+- if (!task->imm_count)
++ if (!ctask->imm_count)
+ return 0;
+
+ /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
++ err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
++ 0, ctask->imm_count);
+ if (err)
+ return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ tcp_ctask->sent += ctask->imm_count;
++ ctask->imm_count = 0;
++ return 0;
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ **/
++static int
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ int rc;
++
++ /* Flush any pending data first. */
++ rc = iscsi_tcp_flush(conn);
++ if (rc < 0)
++ return rc;
++
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock_bh(&session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock_bh(&session->lock);
++ }
++
+ return 0;
+ }
+
+ /*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
++ * iscsi_tcp_ctask_xmit - xmit normal PDU task
++ * @conn: iscsi connection
++ * @ctask: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+ int rc = 0;
+
+ flush:
+@@ -1402,39 +1394,32 @@ flush:
+ if (rc < 0)
+ return rc;
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
+- return 0;
+- }
+-
+ /* Are we done already? */
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (ctask->unsol_count != 0) {
++ struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+
+ /* Prepare a header for the unsolicited PDU.
+ * The amount of data we want to send will be
+- * in task->data_count.
++ * in ctask->data_count.
+ * FIXME: return the data count instead.
+ */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+
+ debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++ ctask->itt, tcp_ctask->sent, ctask->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
++ tcp_ctask->sent,
++ ctask->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
++ tcp_ctask->sent += ctask->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ goto flush;
+ } else {
+ struct iscsi_session *session = conn->session;
+@@ -1443,22 +1428,22 @@ flush:
+ /* All unsolicited PDUs sent. Check for solicited PDUs.
+ */
+ spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+ /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
++ if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ debug_scsi(" done with r2t %p\n", r2t);
+
+- __kfifo_put(tcp_task->r2tpool.queue,
++ __kfifo_put(tcp_ctask->r2tpool.queue,
+ (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
++ tcp_ctask->r2t = r2t = NULL;
+ }
+ }
+
+ if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ }
+ spin_unlock_bh(&session->lock);
+
+@@ -1469,19 +1454,19 @@ flush:
+ }
+
+ debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
++ r2t, r2t->solicit_datasn - 1, ctask->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+ sizeof(struct iscsi_hdr));
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
+ r2t->data_offset + r2t->sent,
+ r2t->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += r2t->data_count;
++ tcp_ctask->sent += r2t->data_count;
+ r2t->sent += r2t->data_count;
+ goto flush;
+ }
+@@ -1498,7 +1483,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,14 +1493,18 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+ if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ goto free_tcp_conn;
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+@@ -1527,12 +1516,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
++free_tcp_conn:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Could not create connection due to crc32c "
+ "loading error. Make sure the crc32c "
+ "module is built as a module or into the "
+ "kernel\n");
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1553,6 +1544,7 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+
+ spin_lock_bh(&session->lock);
+ tcp_conn->sock = NULL;
++ conn->recv_lock = NULL;
+ spin_unlock_bh(&session->lock);
+ sockfd_put(sock);
+ }
+@@ -1564,32 +1556,20 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+@@ -1640,8 +1620,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1665,8 +1643,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ if (err)
+ goto free_socket;
+
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
++ err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
++ &conn->local_port, kernel_getsockname);
+ if (err)
+ goto free_socket;
+
+@@ -1683,6 +1661,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+@@ -1696,6 +1681,21 @@ free_socket:
+ return err;
+ }
+
++/* called with host lock */
++static void
++iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
++
++ /* Prepare PDU, optionally w/ immediate data */
++ iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
++
++ /* If we have immediate data, attach a payload */
++ if (mtask->data_count)
++ iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
++ mtask->data_count);
++}
++
+ static int
+ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ {
+@@ -1706,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ }
+
+@@ -1818,6 +1818,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ return len;
+ }
+
++static int
++iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
++ char *buf)
++{
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
++ int len;
++
++ switch (param) {
++ case ISCSI_HOST_PARAM_IPADDRESS:
++ spin_lock_bh(&session->lock);
++ if (!session->leadconn)
++ len = -ENODEV;
++ else
++ len = sprintf(buf, "%s\n",
++ session->leadconn->local_address);
++ spin_unlock_bh(&session->lock);
++ break;
++ default:
++ return iscsi_host_get_param(shost, param, buf);
++ }
++ return len;
++}
++
+ static void
+ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ {
+@@ -1843,70 +1866,54 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
++ ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
++ }
++
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+@@ -1961,11 +1968,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME |
+ ISCSI_HOST_NETDEV_NAME,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = 16,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1979,14 +1989,16 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+ /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
++ .get_host_param = iscsi_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_ctask_init,
++ .init_mgmt_task = iscsi_tcp_mtask_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2011,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..ed0b991 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -103,6 +103,11 @@ struct iscsi_data_task {
+ char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
+ };
+
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
++};
++
+ struct iscsi_r2t_info {
+ __be32 ttt; /* copied from R2T */
+ __be32 exp_statsn; /* copied from R2T */
+@@ -114,7 +119,7 @@ struct iscsi_r2t_info {
+ struct iscsi_data_task dtask; /* Data-Out header buf */
+ };
+
+-struct iscsi_tcp_task {
++struct iscsi_tcp_cmd_task {
+ struct iscsi_hdr_buff {
+ struct iscsi_cmd cmd_hdr;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..9975095 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -38,6 +38,14 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
++{
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
++}
++EXPORT_SYMBOL_GPL(class_to_transport_session);
++
+ /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+ #define SNA32_CHECK 2147483648UL
+
+@@ -79,170 +87,91 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ * xmit thread
+ */
+ if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
++ !list_empty(&session->leadconn->mgmtqueue))
++ scsi_queue_work(session->host,
++ &session->leadconn->xmitwork);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
++static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+ {
+- unsigned exp_len = task->hdr_len + len;
++ unsigned exp_len = ctask->hdr_len + len;
+
+- if (exp_len > task->hdr_max) {
++ if (exp_len > ctask->hdr_max) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
++ ctask->hdr_len = exp_len;
+ return 0;
+ }
+
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++ unsigned hdrlength;
+ int rc;
+
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
++ ctask->hdr_len = 0;
++ rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ if (rc)
+ return rc;
+ hdr->opcode = ISCSI_OP_SCSI_CMD;
+ hdr->flags = ISCSI_ATTR_SIMPLE;
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
++ hdr->itt = build_itt(ctask->itt, session->age);
++ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
+ hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ session->cmdsn++;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ if (sc->cmd_len < MAX_COMMAND_SIZE)
++ memset(&hdr->cdb[sc->cmd_len], 0,
++ MAX_COMMAND_SIZE - sc->cmd_len);
+
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ ctask->imm_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,41 +187,40 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (scsi_bufflen(sc) >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(scsi_bufflen(sc),
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(hdr->dlength, ctask->imm_count);
+ } else
+ zero_data(hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min((session->first_burst),
++ (scsi_bufflen(sc))) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+ /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
++ hdrlength = ctask->hdr_len - sizeof(*hdr);
+
+ WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ hdrlength /= ISCSI_PAD_LEN;
+@@ -300,180 +228,110 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ WARN_ON(hdrlength >= 256);
+ hdr->hlength = hdrlength & 0xFF;
+
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
++ if (conn->session->tt->init_cmd_task(conn->ctask))
++ return EIO;
+
+ conn->scsicmd_pdus_cnt++;
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
++ "cmdsn %d win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ return 0;
+ }
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
+-
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
++ struct scsi_cmnd *sc = ctask->sc;
+
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
+-}
+-
+-void __iscsi_get_task(struct iscsi_task *task)
+-{
+- atomic_inc(&task->refcount);
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ atomic_inc(&ctask->refcount);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+ /*
+ * session lock must be held
+ */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ int err)
+ {
+ struct scsi_cmnd *sc;
+
+- sc = task->sc;
++ sc = ctask->sc;
+ if (!sc)
+ return;
+
+- if (task->state == ISCSI_TASK_PENDING)
++ if (ctask->state == ISCSI_TASK_PENDING)
+ /*
+ * cmd never made it to the xmit thread, so we should not count
+ * the cmd in the sequencing
+ */
+ conn->session->queued_cmdsn--;
+ else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
+
+ sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
+ /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++/**
++ * iscsi_free_mgmt_task - return mgmt task back to pool
++ * @conn: iscsi connection
++ * @mtask: mtask
++ *
++ * Must be called with session lock.
++ */
++void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
++ list_del_init(&mtask->running);
++ if (conn->login_mtask == mtask)
++ return;
+
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
++ if (conn->ping_mtask == mtask)
++ conn->ping_mtask = NULL;
++ __kfifo_put(conn->session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
+ }
++EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
+
+-static struct iscsi_task *
++static struct iscsi_mgmt_task *
+ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+@@ -483,56 +341,29 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ /*
+ * Login and Text are sent serially, in
+ * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
+ */
+- task = conn->login_task;
++ mtask = conn->login_mtask;
+ else {
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*)))
+ return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+ }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+
+ if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
+ } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
++ mtask->data_count = 0;
+
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ INIT_LIST_HEAD(&mtask->running);
++ list_add_tail(&mtask->running, &conn->mgmtqueue);
++ return mtask;
+ }
+
+ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+@@ -546,6 +377,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
++ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+@@ -554,7 +386,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+@@ -562,12 +394,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * then completes the command and task.
+ **/
+ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
++ struct iscsi_cmd_task *ctask, char *data,
+ int datalen)
+ {
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+@@ -591,7 +423,7 @@ invalid_datalen:
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,18 +433,6 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
+-
+ if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ ISCSI_FLAG_CMD_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+@@ -620,17 +440,19 @@ invalid_datalen:
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
++ ISCSI_FLAG_CMD_BIDI_OVERFLOW))
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -655,9 +477,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ {
+ struct iscsi_nopout hdr;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- if (!rhdr && conn->ping_task)
++ if (!rhdr && conn->ping_mtask)
+ return;
+
+ memset(&hdr, 0, sizeof(struct iscsi_nopout));
+@@ -671,9 +493,18 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ } else
+ hdr.ttt = RESERVED_ITT;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
++ if (!mtask) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
++ return;
++ }
++
++ /* only track our nops */
++ if (!rhdr) {
++ conn->ping_mtask = mtask;
++ conn->last_ping = jiffies;
++ }
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+@@ -702,31 +533,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -737,28 +543,108 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+ * queuecommand or send generic. session lock must be held and verify
+ * itt must have been called.
+ */
+-int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, int datalen)
++static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, int datalen)
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+ conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
++ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
++ datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (conn->ping_mtask != mtask) {
++ /*
++ * If this is not in response to one of our
++ * nops then it must be from userspace.
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
++ datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ } else
++ mod_timer(&conn->transport_timer,
++ jiffies + conn->recv_timeout);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+
+ switch(opcode) {
+@@ -785,104 +671,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+
+ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+@@ -896,63 +689,51 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "received itt %x expected session "
++ "age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
++ "with itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,6 +755,61 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
++static void iscsi_prep_mtask(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = mtask->hdr;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++
++ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
++ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, session->age);
++ /*
++ * TODO: We always use immediate, so we never hit this.
++ * If we start to send tmfs or nops as non-immediate then
++ * we should start checking the cmdsn numbers for mgmt tasks.
++ */
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
++ session->queued_cmdsn++;
++ session->cmdsn++;
++ }
++ }
++
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
++ mtask->data_count);
++}
++
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
++{
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc;
++
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
++ conn->session->state = ISCSI_STATE_LOGGING_OUT;
++ spin_unlock_bh(&conn->session->lock);
++
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ spin_lock_bh(&conn->session->lock);
++ if (rc)
++ return rc;
++
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
++ return 0;
++}
++
+ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ {
+ struct iscsi_session *session = conn->session;
+@@ -991,38 +827,37 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ return 0;
+ }
+
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
++static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task = conn->task;
++ struct iscsi_cmd_task *ctask = conn->ctask;
+ int rc;
+
+- __iscsi_get_task(task);
++ __iscsi_get_ctask(ctask);
+ spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
++ rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ if (!rc)
+- /* done with this task */
+- conn->task = NULL;
++ /* done with this ctask */
++ conn->ctask = NULL;
+ return rc;
+ }
+
+ /**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
++ * iscsi_requeue_ctask - requeue ctask to run from session workqueue
++ * @ctask: ctask to requeue
+ *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
++ * LLDs that need to run a ctask from the session workqueue should call
++ * this. The session lock must be held.
+ */
+-void iscsi_requeue_task(struct iscsi_task *task)
++void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+- list_move_tail(&task->running, &conn->requeue);
++ list_move_tail(&ctask->running, &conn->requeue);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
++EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1044,8 +879,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ return -ENODATA;
+ }
+
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ rc = iscsi_xmit_ctask(conn);
++ if (rc)
++ goto again;
++ }
++
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1057,14 +898,17 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ */
+ check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
++ conn->mtask = list_entry(conn->mgmtqueue.next,
++ struct iscsi_mgmt_task, running);
++ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
++ iscsi_free_mgmt_task(conn, conn->mtask);
++ conn->mtask = NULL;
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ iscsi_prep_mtask(conn, conn->mtask);
++ list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1074,21 +918,24 @@ check_mgmt:
+ if (conn->tmf_state == TMF_QUEUED)
+ break;
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
++ fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ continue;
+ }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
++ if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
++ fail_command(conn, conn->ctask, DID_ABORT << 16);
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ /*
+- * we could continuously get new task requests so
++ * we could continuously get new ctask requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+@@ -1106,11 +953,11 @@ check_mgmt:
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
++ conn->ctask = list_entry(conn->requeue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ if (!list_empty(&conn->mgmtqueue))
+@@ -1154,12 +1001,11 @@ enum {
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+@@ -1168,11 +1014,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ host = sc->device->host;
+ spin_unlock(host->host_lock);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ session = iscsi_hostdata(host->hostdata);
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
++ reason = iscsi_session_chkready(session_to_cls(session));
+ if (reason) {
+ sc->result = reason;
+ goto fault;
+@@ -1227,39 +1072,26 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ goto reject;
+ }
+
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
++ session->queued_cmdsn++;
++
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
++ sc->SCp.ptr = (char *)ctask;
+
+- session->queued_cmdsn++;
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
+ spin_unlock(&session->lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ spin_lock(host->host_lock);
+ return 0;
+
+@@ -1272,13 +1104,8 @@ reject:
+ fault:
+ spin_unlock(&session->lock);
+ debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ sc->scsi_done(sc);
+ spin_lock(host->host_lock);
+ return 0;
+ }
+@@ -1295,7 +1122,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+@@ -1309,13 +1136,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+@@ -1377,11 +1200,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ int timeout)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+- if (!task) {
++ if (!mtask) {
+ spin_unlock_bh(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ spin_lock_bh(&session->lock);
+@@ -1397,6 +1220,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);
++ scsi_queue_work(session->host, &conn->xmitwork);
+
+ /*
+ * block eh thread until:
+@@ -1415,7 +1239,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
++ /* if the session drops it will clean up the mtask */
+ if (age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN)
+ return -ENOTCONN;
+@@ -1429,51 +1253,48 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+ int error)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_cmd_task *ctask, *tmp;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
++ conn->ctask = NULL;
+
+ /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+ /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+ }
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
++static void iscsi_suspend_tx(struct iscsi_conn *conn)
+ {
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
++ scsi_flush_work(conn->session->host);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+
+ static void iscsi_start_tx(struct iscsi_conn *conn)
+ {
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+@@ -1484,7 +1305,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+
+ cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
++ session = class_to_transport_session(cls_session);
+
+ debug_scsi("scsi cmd %p timedout\n", scmd);
+
+@@ -1522,7 +1343,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ jiffies))
+ rc = EH_RESET_TIMER;
+ /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
++ if (conn->ping_mtask)
+ rc = EH_RESET_TIMER;
+ done:
+ spin_unlock(&session->lock);
+@@ -1546,7 +1367,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
+
+ recv_timeout *= HZ;
+ last_recv = conn->last_recv;
+- if (conn->ping_task &&
++ if (conn->ping_mtask &&
+ time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+ jiffies)) {
+ iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+@@ -1572,30 +1393,27 @@ done:
+ spin_unlock(&session->lock);
+ }
+
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
++static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_tm *hdr)
+ {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_tm *hdr;
+ int rc, age;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ /*
+@@ -1624,17 +1442,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->eh_abort_cnt++;
+ age = session->age;
+
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
++ if (ctask->state == ISCSI_TASK_PENDING) {
++ fail_command(conn, ctask, DID_ABORT << 16);
+ goto success;
+ }
+
+@@ -1644,7 +1462,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ iscsi_prep_abort_task_pdu(ctask, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ rc = FAILED;
+@@ -1654,20 +1472,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+ iscsi_suspend_tx(conn);
+ /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
++ * clean up task if aborted. grab the recv lock as a writer
+ */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
++ fail_command(conn, ctask, DID_ABORT << 16);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+ iscsi_start_tx(conn);
+ goto success_unlocked;
+ case TMF_TIMEDOUT:
+@@ -1677,7 +1491,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ case TMF_NOT_FOUND:
+ if (!sc->SCp.ptr) {
+ conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ /* ctask completed before tmf abort response */
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+@@ -1690,7 +1504,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ success:
+ spin_unlock_bh(&session->lock);
+ success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+
+@@ -1698,7 +1512,7 @@ failed:
+ spin_unlock_bh(&session->lock);
+ failed_unlocked:
+ debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
++ ctask ? ctask->itt : 0);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+@@ -1716,15 +1530,12 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+
+ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+
+ mutex_lock(&session->eh_mutex);
+@@ -1767,11 +1578,13 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
+-
++ /* need to grab the recv lock then session lock */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_all_commands(conn, sc->device->lun, DID_ERROR);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+@@ -1847,203 +1660,177 @@ void iscsi_pool_free(struct iscsi_pool *q)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
++ * @cmds_max: scsi host can queue
++ * @qdepth: scsi host cmds per lun
++ * @cmd_task_size: LLD ctask private data size
++ * @mgmt_task_size: LLD mtask private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
+- return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
++ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
++ if (qdepth != 0)
++ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
++ "Queue depth must be between 1 and %d.\n",
++ qdepth, ISCSI_MAX_CMD_PER_LUN);
++ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
++ if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
++ cmds_max < 2) {
++ if (cmds_max != 0)
++ printk(KERN_ERR "iscsi: invalid can_queue of %d. "
++ "can_queue must be a power of 2 and between "
++ "2 and %d - setting to %d.\n", cmds_max,
++ ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
++ cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++
++ /* the iscsi layer takes one task for reserve */
++ shost->can_queue = cmds_max - 1;
++ shost->cmd_per_lun = qdepth;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
++ *hostno = shost->host_no;
++
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+ session->fast_abort = 1;
+ session->lu_reset_timeout = 15;
+ session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = cmds_max;
+ session->queued_cmdsn = session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+ mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
+
+- if (iscsi_add_session(cls_session, id))
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
++
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool);
++mgmtpool_alloc_fail:
+ iscsi_pool_free(&session->cmdpool);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
++ iscsi_remove_session(cls_session);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool);
+ iscsi_pool_free(&session->cmdpool);
+
+ kfree(session->password);
+@@ -2051,10 +1838,12 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ kfree(session->username);
+ kfree(session->username_in);
+ kfree(session->targetname);
++ kfree(session->netdev);
++ kfree(session->hwaddress);
+ kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+- iscsi_destroy_session(cls_session);
++ iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,26 +1851,22 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+@@ -2100,30 +1885,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ INIT_LIST_HEAD(&conn->requeue);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+ init_timer(&conn->tmf_timer);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2183,7 +1968,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+ if (session->leadconn == conn)
+ session->leadconn = NULL;
+@@ -2255,7 +2040,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
++ iscsi_unblock_session(session_to_cls(session));
+ wake_up(&conn->ehwait);
+ return 0;
+ }
+@@ -2264,23 +2049,21 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+- conn->task = NULL;
++ conn->mtask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2299,6 +2082,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ }
+
+ /*
++ * The LLD either freed/unset the lock on us, or userspace called
++ * stop but did not create a proper connection (connection was never
++ * bound or it was unbound then stop was called).
++ */
++ if (!conn->recv_lock) {
++ spin_unlock_bh(&session->lock);
++ mutex_unlock(&session->eh_mutex);
++ return;
++ }
++
++ /*
+ * When this is called for the in_login state, we only want to clean
+ * up the login task and connection. We do not need to block and set
+ * the recovery state again
+@@ -2314,6 +2108,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
++
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +2125,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2361,7 +2160,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2500,14 +2299,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,7 +2310,8 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+@@ -2574,15 +2366,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2642,35 +2425,29 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
++ if (!session->netdev)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->netdev);
++ len = sprintf(buf, "%s\n", session->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
++ if (!session->hwaddress)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
++ len = sprintf(buf, "%s\n", session->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
++ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
++ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
++
+ default:
+ return -ENOSYS;
+ }
+@@ -2682,20 +2459,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
++ if (!session->netdev)
++ session->netdev = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
++ if (!session->hwaddress)
++ session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
++ if (!session->initiatorname)
++ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
+ default:
+ return -ENOSYS;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..ca7bb6f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,24 +30,23 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
++#define ISCSI_SESSION_ATTRS 19
+ #define ISCSI_CONN_ATTRS 13
+ #define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_TRANSPORT_VERSION "2.0-869"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+@@ -64,12 +63,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +78,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,142 +116,21 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
++ atomic_set(&ihost->nr_scans, 0);
+
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
++ snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
++ shost->host_no);
+ ihost->scan_workq = create_singlethread_workqueue(
+ ihost->scan_workq_name);
+ if (!ihost->scan_workq)
+@@ -257,10 +139,10 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ }
+
+ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ destroy_workqueue(ihost->scan_workq);
+ return 0;
+@@ -403,24 +285,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+ /**
+ * iscsi_scan_finished - helper to report when running scans are done
+ * @shost: scsi host
+@@ -431,7 +295,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+ */
+ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ /*
+ * qla4xxx will have kicked off some session unblocks before calling
+ * scsi_scan_host, so just wait for them to complete.
+@@ -440,61 +304,22 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
++ return 0;
+ }
+
+ static void iscsi_scan_session(struct work_struct *work)
+@@ -502,14 +327,19 @@ static void iscsi_scan_session(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, scan_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
++ struct iscsi_host *ihost = shost->shost_data;
++ unsigned long flags;
+
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
++ spin_lock_irqsave(&session->lock, flags);
++ if (session->state != ISCSI_SESSION_LOGGED_IN) {
++ spin_unlock_irqrestore(&session->lock, flags);
++ goto done;
++ }
++ spin_unlock_irqrestore(&session->lock, flags);
+
+- iscsi_user_scan_session(&session->dev, &scan_data);
++ scsi_scan_target(&session->dev, 0, session->target_id,
++ SCAN_WILD_CARD, 1);
++done:
+ atomic_dec(&ihost->nr_scans);
+ }
+
+@@ -549,7 +379,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unblock_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ /*
+@@ -617,19 +447,15 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unbind_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
++ if (list_empty(&session->host_list)) {
+ mutex_unlock(&ihost->mutex);
+ return;
+ }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
++ list_del_init(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+ scsi_remove_target(&session->dev);
+@@ -639,18 +465,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ static int iscsi_unbind_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ return queue_work(ihost->scan_workq, &session->unbind_work);
+ }
+
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+@@ -659,6 +485,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->recovery_tmo = 120;
+ session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+ INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+ INIT_WORK(&session->block_work, __iscsi_block_session);
+@@ -671,57 +498,22 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
++ struct iscsi_host *ihost;
+ unsigned long flags;
+- unsigned int id = target_id;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+@@ -737,6 +529,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ list_add(&session->sess_list, &sesslist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
++
+ iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+ return 0;
+
+@@ -750,18 +546,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+ * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+ */
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -797,7 +593,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+ int err;
+
+@@ -863,7 +659,6 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -876,17 +671,18 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * non-zero.
+ */
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -1219,20 +1015,21 @@ int iscsi_session_event(struct iscsi_cls_session *session,
+ EXPORT_SYMBOL_GPL(iscsi_session_event);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.cmds_max,
++ ev->u.c_session.queue_depth,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1307,7 +1104,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +1113,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1408,7 +1193,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,22 +1206,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+@@ -1568,8 +1337,11 @@ iscsi_if_rx(struct sk_buff *skb)
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1349,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1604,16 +1375,17 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+ iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+ iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
++
+ /*
+ * iSCSI session attrs
+ */
+ #define iscsi_session_attr_show(param, perm) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+@@ -1643,14 +1415,11 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+ iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+
+ static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_priv_session_state(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
+ return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+@@ -1658,11 +1427,9 @@ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1677,10 +1444,9 @@ iscsi_priv_session_attr(recovery_tmo, "%d");
+ */
+ #define iscsi_host_attr_show(param) \
+ static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_host_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
++ struct Scsi_Host *shost = transport_class_to_shost(cdev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+ }
+@@ -1697,7 +1463,7 @@ iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1471,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,7 +1479,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1721,7 +1487,7 @@ do { \
+ #define SETUP_HOST_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->host_attrs[count] = &class_device_attr_host_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,24 +1577,22 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
+ transport_container_register(&priv->t.host_attrs);
+
+ SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+@@ -1886,8 +1650,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+ SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+ SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+ SETUP_PRIV_SESSION_RD_ATTR(state);
+
+@@ -1901,9 +1663,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1691,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1712,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,8 +1724,8 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+@@ -1988,8 +1745,6 @@ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -2002,7 +1757,6 @@ static void __exit iscsi_transport_exit(void)
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..7b90b63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -52,7 +49,9 @@ struct device;
+ #endif
+
+ #define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_MGMT_CMDS_MAX 16 /* must be power of 2 */
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+@@ -70,10 +69,7 @@ enum {
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+@@ -86,6 +82,18 @@ enum {
+ ISCSI_DIGEST_SIZE = sizeof(__u32),
+ };
+
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ unsigned data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
++};
+
+ enum {
+ ISCSI_TASK_COMPLETED,
+@@ -93,7 +101,7 @@ enum {
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+ * Because LLDs allocate their hdr differently, this is a pointer
+ * and length to that storage. It must be setup at session
+@@ -110,7 +118,6 @@ struct iscsi_task {
+ /* offset in unsolicited stream (bytes); */
+ unsigned unsol_offset;
+ unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
+ struct iscsi_conn *conn; /* used connection */
+
+@@ -121,9 +128,9 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
++static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+ {
+- return (void*)task->hdr + task->hdr_len;
++ return (void*)ctask->hdr + ctask->hdr_len;
+ }
+
+ /* Connection's states */
+@@ -139,6 +146,11 @@ struct iscsi_conn {
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+@@ -147,7 +159,7 @@ struct iscsi_conn {
+ unsigned long last_ping;
+ int ping_timeout;
+ int recv_timeout;
+- struct iscsi_task *ping_task;
++ struct iscsi_mgmt_task *ping_mtask;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,8 +175,9 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
+@@ -195,6 +208,9 @@ struct iscsi_conn {
+ /* remote portal currently connected to */
+ int portal_port;
+ char portal_address[ISCSI_ADDRESS_BUF_LEN];
++ /* local address */
++ int local_port;
++ char local_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,7 +225,6 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+@@ -230,7 +245,6 @@ enum {
+ };
+
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+ /*
+ * Syncs up the scsi eh thread with the iscsi eh thread when sending
+ * task management functions. This must be taken before the session
+@@ -266,8 +280,10 @@ struct iscsi_session {
+ char *password;
+ char *password_in;
+ char *targetname;
+- char *ifacename;
+ char *initiatorname;
++ /* hw address or netdev iscsi connection is bound to */
++ char *hwaddress;
++ char *netdev;
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +297,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
+ struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -307,44 +315,42 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
++
+ /*
+ * iSCSI host helpers.
+ */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+ extern int iscsi_host_set_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf,
+ int buflen);
+ extern int iscsi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+
+ /*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
++
+ #define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++ iscsi_cls_session_printk(prefix, \
++ (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,29 +359,25 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+
+ #define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
++ iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+ extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+- char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
++extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
++extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+ /*
+ * generic helpers
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..aab1eae 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -30,11 +30,11 @@
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +58,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -86,9 +83,17 @@ struct iscsi_transport {
+ /* LLD sets this to indicate what values it can export to sysfs */
+ uint64_t param_mask;
+ uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint16_t, uint16_t,
++ uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -113,15 +118,20 @@ struct iscsi_transport {
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
+ int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+@@ -162,10 +172,9 @@ enum {
+ ISCSI_SESSION_FREE,
+ };
+
+-#define ISCSI_MAX_TARGET -1
+-
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+ spinlock_t lock;
+ struct work_struct block_work;
+@@ -177,7 +186,7 @@ struct iscsi_cls_session {
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+ int state;
+ int sid; /* session id */
+@@ -194,20 +203,12 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
++struct iscsi_host {
++ struct list_head sessions;
+ atomic_t nr_scans;
+ struct mutex mutex;
+ struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
++ char scan_workq_name[KOBJ_NAME_LEN];
+ };
+
+ /*
+@@ -221,26 +222,22 @@ struct iscsi_endpoint {
+
+ extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+ extern int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10/iscsi_02_count_fmr_align_violations.patch b/kernel_patches/backport/2.6.16_sles10/iscsi_02_count_fmr_align_violations.patch
new file mode 100644
index 0000000..9bf2d19
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10/iscsi_02_count_fmr_align_violations.patch
@@ -0,0 +1,24 @@
+From 02753dd2caabfe6b1885cb80a8fb8532b416108d Mon Sep 17 00:00:00 2001
+From: Eli Dorfman <elid at voltaire.com>
+Date: Tue, 29 Apr 2008 10:12:39 +0300
+Subject: [PATCH] IB/iSER: Count fmr alignment violations per session
+
+Count fmr alignment violations per session
+as part of the iscsi statistics.
+
+Signed-off-by: Eli Dorfman <elid at voltaire.com>
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 7b90b63..cd3ca63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -225,6 +225,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+--
+1.5.5
+
diff --git a/kernel_patches/backport/2.6.16_sles10/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.16_sles10/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
new file mode 100644
index 0000000..798571f
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
@@ -0,0 +1,151 @@
+From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 29 Jun 2008 15:41:12 +0300
+Subject: [PATCH] copmat patch for RHEL5 and SLES10
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
+ 1 files changed, 54 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index ca7bb6f..0ccd7e2 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -20,6 +20,8 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
++#include <linux/version.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <net/tcp.h>
+@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ * the async scanning code (drivers like iscsi_tcp do login and
+ * scanning from userspace).
+ */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
++ if (shost->hostt->scan_finished) {
++ if (queue_work(ihost->scan_workq, &session->scan_work))
++ atomic_inc(&ihost->nr_scans);
++ }
++#endif
+ }
+
+ /**
+@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ * Malformed skbs with wrong lengths or invalid creds are not processed.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
+ return 0;
+
+ release_nls:
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+@@ -1753,7 +1768,7 @@ unregister_transport_class:
+ static void __exit iscsi_transport_exit(void)
+ {
+ destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.16_sles10/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
deleted file mode 100644
index 798571f..0000000
--- a/kernel_patches/backport/2.6.16_sles10/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
-From: Doron Shoham <dorons at voltaire.com>
-Date: Sun, 29 Jun 2008 15:41:12 +0300
-Subject: [PATCH] copmat patch for RHEL5 and SLES10
-
-Signed-off-by: Doron Shoham <dorons at voltaire.com>
----
- drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
- 1 files changed, 54 insertions(+), 39 deletions(-)
-
-diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
-index ca7bb6f..0ccd7e2 100644
---- a/drivers/scsi/scsi_transport_iscsi.c
-+++ b/drivers/scsi/scsi_transport_iscsi.c
-@@ -20,6 +20,8 @@
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-+#include <linux/version.h>
-+#include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/mutex.h>
- #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
-- if (shost->hostt->scan_finished) {
-- if (queue_work(ihost->scan_workq, &session->scan_work))
-- atomic_inc(&ihost->nr_scans);
-- }
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
-+ if (shost->hostt->scan_finished) {
-+ if (queue_work(ihost->scan_workq, &session->scan_work))
-+ atomic_inc(&ihost->nr_scans);
-+ }
-+#endif
- }
-
- /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
- * Malformed skbs with wrong lengths or invalid creds are not processed.
- */
- static void
--iscsi_if_rx(struct sk_buff *skb)
-+iscsi_if_rx(struct sock *sk, int len)
- {
-+ struct sk_buff *skb;
-+
- mutex_lock(&rx_queue_mutex);
-- while (skb->len >= NLMSG_SPACE(0)) {
-- int err;
-- uint32_t rlen;
-- struct nlmsghdr *nlh;
-- struct iscsi_uevent *ev;
--
-- nlh = nlmsg_hdr(skb);
-- if (nlh->nlmsg_len < sizeof(*nlh) ||
-- skb->len < nlh->nlmsg_len) {
-- break;
-+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-+ if (NETLINK_CREDS(skb)->uid) {
-+ skb_pull(skb, skb->len);
-+ goto free_skb;
- }
-
-- ev = NLMSG_DATA(nlh);
-- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-- if (rlen > skb->len)
-- rlen = skb->len;
-+ while (skb->len >= NLMSG_SPACE(0)) {
-+ int err;
-+ uint32_t rlen;
-+ struct nlmsghdr *nlh;
-+ struct iscsi_uevent *ev;
-
-- err = iscsi_if_recv_msg(skb, nlh);
-- if (err) {
-- ev->type = ISCSI_KEVENT_IF_ERROR;
-- ev->iferror = err;
-- }
-- do {
-- /*
-- * special case for GET_STATS:
-- * on success - sending reply and stats from
-- * inside of if_recv_msg(),
-- * on error - fall through.
-- */
-- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ nlh = nlmsg_hdr(skb);
-+ if (nlh->nlmsg_len < sizeof(*nlh) ||
-+ skb->len < nlh->nlmsg_len) {
- break;
-- err = iscsi_if_send_reply(
-- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-- } while (err < 0 && err != -ECONNREFUSED);
-- skb_pull(skb, rlen);
-+ }
-+
-+ ev = NLMSG_DATA(nlh);
-+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-+ if (rlen > skb->len)
-+ rlen = skb->len;
-+
-+ err = iscsi_if_recv_msg(skb, nlh);
-+ if (err) {
-+ ev->type = ISCSI_KEVENT_IF_ERROR;
-+ ev->iferror = err;
-+ }
-+ do {
-+ /*
-+ * special case for GET_STATS:
-+ * on success - sending reply and stats from
-+ * inside of if_recv_msg(),
-+ * on error - fall through.
-+ */
-+ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ break;
-+ err = iscsi_if_send_reply(
-+ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-+ } while (err < 0 && err != -ECONNREFUSED);
-+ skb_pull(skb, rlen);
-+ }
-+free_skb:
-+ kfree_skb(skb);
- }
- mutex_unlock(&rx_queue_mutex);
- }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
- return 0;
-
- release_nls:
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- unregister_session_class:
- transport_class_unregister(&iscsi_session_class);
- unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
- static void __exit iscsi_transport_exit(void)
- {
- destroy_workqueue(iscsi_eh_timer_workq);
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- transport_class_unregister(&iscsi_connection_class);
- transport_class_unregister(&iscsi_session_class);
- transport_class_unregister(&iscsi_host_class);
---
-1.5.3.8
-
diff --git a/kernel_patches/backport/2.6.16_sles10/iser_01_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.16_sles10/iser_01_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..155795d
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10/iser_01_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From ad1e1df62ff096cc90257b0b42e843d0773ae981 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 11:37:50 +0300
+Subject: [PATCH] iser backports
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10/iser_02_fix_iscsi_if.h b/kernel_patches/backport/2.6.16_sles10/iser_02_fix_iscsi_if.h
new file mode 100644
index 0000000..02c8a81
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10/iser_02_fix_iscsi_if.h
@@ -0,0 +1,145 @@
+From c703d2c0ca18a6a5b8f4ecbd5c02654a15fb11ff Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 14:26:17 +0300
+Subject: [PATCH] fix iscsi_if.h
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/iscsi_if.h | 93 ++++++++++++++++++++++-------------------------
+ 1 files changed, 43 insertions(+), 50 deletions(-)
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..e19e584 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -50,7 +50,6 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+ ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+ ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+@@ -79,12 +78,6 @@ struct iscsi_uevent {
+ uint16_t cmds_max;
+ uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -257,49 +250,42 @@ enum iscsi_param {
+
+ ISCSI_PARAM_PING_TMO,
+ ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
++#define ISCSI_USERNAME (1 << ISCSI_PARAM_USERNAME)
++#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN)
++#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD)
++#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN)
++#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT)
++#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO)
++#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO)
++#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO)
++#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO)
++#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO)
+
+ /* iSCSI HBA params */
+ enum iscsi_host_param {
+@@ -310,13 +296,20 @@ enum iscsi_host_param {
+ ISCSI_HOST_PARAM_MAX,
+ };
+
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_HOST_HWADDRESS (1 << ISCSI_HOST_PARAM_HWADDRESS)
++#define ISCSI_HOST_INITIATOR_NAME (1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
++#define ISCSI_HOST_NETDEV_NAME (1 << ISCSI_HOST_PARAM_NETDEV_NAME)
++#define ISCSI_HOST_IPADDRESS (1 << ISCSI_HOST_PARAM_IPADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch b/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
new file mode 100644
index 0000000..cd24137
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
@@ -0,0 +1,4746 @@
+From 89ac09ec66db75fbda1bd77918066fb2ddebac38 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Mon, 25 Aug 2008 16:16:26 +0300
+Subject: [PATCH] iscsi_01_sync_kernel_code_with_release_2.0-869.2
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 529 +++++++-------
+ drivers/scsi/iscsi_tcp.h | 7 +-
+ drivers/scsi/libiscsi.c | 1457 +++++++++++++++--------------------
+ drivers/scsi/scsi_transport_iscsi.c | 500 +++---------
+ include/scsi/libiscsi.h | 108 ++--
+ include/scsi/scsi_transport_iscsi.h | 93 ++--
+ 6 files changed, 1120 insertions(+), 1574 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..8a17867 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -64,10 +64,6 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -498,63 +494,58 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- tcp_task->r2t = NULL;
++ tcp_ctask->r2t = NULL;
+ }
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (tcp_ctask->exp_datasn != datasn) {
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+ return ISCSI_ERR_DATASN;
+ }
+
+- tcp_task->exp_datasn++;
++ tcp_ctask->exp_datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ __FUNCTION__, tcp_ctask->data_offset,
++ tcp_conn->in.datalen, scsi_bufflen(sc));
+ return ISCSI_ERR_DATA_OFFSET;
+ }
+
+@@ -567,8 +558,8 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
++ res_count <= scsi_bufflen(sc)))
++ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+@@ -582,7 +573,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,7 +583,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -603,8 +594,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -624,14 +615,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+@@ -644,23 +635,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_datasn != r2tsn){
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+ return ISCSI_ERR_R2TSN;
+ }
+
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
++ if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ ctask->itt);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+@@ -668,7 +659,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ if (r2t->data_length == 0) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -679,12 +670,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
++ if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ r2t->data_offset, scsi_bufflen(ctask->sc));
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -692,13 +683,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->exp_datasn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+ conn->r2t_pdus_cnt++;
+
+- iscsi_requeue_task(task);
++ iscsi_requeue_ctask(ctask);
+ return 0;
+ }
+
+@@ -741,8 +732,10 @@ static int
+ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+@@ -760,7 +753,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
++ rc = iscsi_verify_itt(conn, hdr, &itt);
+ if (rc)
+ return rc;
+
+@@ -769,21 +762,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
++ ctask = session->cmds[itt];
+ spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
++ rc = iscsi_data_rsp(conn, ctask);
++ spin_unlock(&conn->session->lock);
++ if (rc)
++ return rc;
+ if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+@@ -798,21 +785,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
++ tcp_ctask->data_offset,
+ tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
+- return rc;
++ return iscsi_segment_seek_sg(&tcp_conn->in.segment,
++ scsi_sglist(ctask->sc),
++ scsi_sg_count(ctask->sc),
++ tcp_ctask->data_offset,
++ tcp_conn->in.datalen,
++ iscsi_tcp_process_data_in,
++ rx_hash);
+ }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+@@ -821,17 +804,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
+- else
++ else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ spin_lock(&session->lock);
++ rc = iscsi_r2t_rsp(conn, ctask);
++ spin_unlock(&session->lock);
++ } else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -1193,7 +1174,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
++ debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ conn->hdrdgst_en? ", digest enabled" : "");
+
+ /* Clear the data segment - needs to be filled in by the
+@@ -1202,7 +1183,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+
+ /* If header digest is enabled, compute the CRC and
+ * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
++ * sure that both iscsi_tcp_ctask and mtask have
+ * sufficient room.
+ */
+ if (conn->hdrdgst_en) {
+@@ -1234,7 +1215,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
++ debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ tcp_conn, offset, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+@@ -1259,7 +1240,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
++ debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+ /* Make sure the datalen matches what the caller
+@@ -1277,7 +1258,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1288,7 +1269,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ * Called under connection lock.
+ **/
+ static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -1305,8 +1286,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1324,76 +1305,87 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct scsi_cmnd *sc = ctask->sc;
+ int err;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
+- */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
+-
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+-
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
+- }
+-
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++ tcp_ctask->sent = 0;
++ tcp_ctask->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
++ conn->id, ctask->itt, ctask->imm_count,
++ ctask->unsol_count);
++ iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+
+- if (!task->imm_count)
++ if (!ctask->imm_count)
+ return 0;
+
+ /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
++ err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
++ 0, ctask->imm_count);
+ if (err)
+ return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ tcp_ctask->sent += ctask->imm_count;
++ ctask->imm_count = 0;
++ return 0;
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ **/
++static int
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ int rc;
++
++ /* Flush any pending data first. */
++ rc = iscsi_tcp_flush(conn);
++ if (rc < 0)
++ return rc;
++
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock_bh(&session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock_bh(&session->lock);
++ }
++
+ return 0;
+ }
+
+ /*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
++ * iscsi_tcp_ctask_xmit - xmit normal PDU task
++ * @conn: iscsi connection
++ * @ctask: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+ int rc = 0;
+
+ flush:
+@@ -1402,39 +1394,32 @@ flush:
+ if (rc < 0)
+ return rc;
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
+- return 0;
+- }
+-
+ /* Are we done already? */
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (ctask->unsol_count != 0) {
++ struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+
+ /* Prepare a header for the unsolicited PDU.
+ * The amount of data we want to send will be
+- * in task->data_count.
++ * in ctask->data_count.
+ * FIXME: return the data count instead.
+ */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+
+ debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++ ctask->itt, tcp_ctask->sent, ctask->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
++ tcp_ctask->sent,
++ ctask->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
++ tcp_ctask->sent += ctask->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ goto flush;
+ } else {
+ struct iscsi_session *session = conn->session;
+@@ -1443,22 +1428,22 @@ flush:
+ /* All unsolicited PDUs sent. Check for solicited PDUs.
+ */
+ spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+ /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
++ if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ debug_scsi(" done with r2t %p\n", r2t);
+
+- __kfifo_put(tcp_task->r2tpool.queue,
++ __kfifo_put(tcp_ctask->r2tpool.queue,
+ (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
++ tcp_ctask->r2t = r2t = NULL;
+ }
+ }
+
+ if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ }
+ spin_unlock_bh(&session->lock);
+
+@@ -1469,19 +1454,19 @@ flush:
+ }
+
+ debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
++ r2t, r2t->solicit_datasn - 1, ctask->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+ sizeof(struct iscsi_hdr));
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
+ r2t->data_offset + r2t->sent,
+ r2t->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += r2t->data_count;
++ tcp_ctask->sent += r2t->data_count;
+ r2t->sent += r2t->data_count;
+ goto flush;
+ }
+@@ -1498,7 +1483,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,14 +1493,18 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+ if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ goto free_tcp_conn;
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+@@ -1527,12 +1516,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
++free_tcp_conn:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Could not create connection due to crc32c "
+ "loading error. Make sure the crc32c "
+ "module is built as a module or into the "
+ "kernel\n");
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1553,6 +1544,7 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+
+ spin_lock_bh(&session->lock);
+ tcp_conn->sock = NULL;
++ conn->recv_lock = NULL;
+ spin_unlock_bh(&session->lock);
+ sockfd_put(sock);
+ }
+@@ -1564,32 +1556,20 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+@@ -1640,8 +1620,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1665,8 +1643,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ if (err)
+ goto free_socket;
+
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
++ err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
++ &conn->local_port, kernel_getsockname);
+ if (err)
+ goto free_socket;
+
+@@ -1683,6 +1661,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+@@ -1696,6 +1681,21 @@ free_socket:
+ return err;
+ }
+
++/* called with host lock */
++static void
++iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
++
++ /* Prepare PDU, optionally w/ immediate data */
++ iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
++
++ /* If we have immediate data, attach a payload */
++ if (mtask->data_count)
++ iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
++ mtask->data_count);
++}
++
+ static int
+ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ {
+@@ -1706,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ }
+
+@@ -1818,6 +1818,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ return len;
+ }
+
++static int
++iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
++ char *buf)
++{
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
++ int len;
++
++ switch (param) {
++ case ISCSI_HOST_PARAM_IPADDRESS:
++ spin_lock_bh(&session->lock);
++ if (!session->leadconn)
++ len = -ENODEV;
++ else
++ len = sprintf(buf, "%s\n",
++ session->leadconn->local_address);
++ spin_unlock_bh(&session->lock);
++ break;
++ default:
++ return iscsi_host_get_param(shost, param, buf);
++ }
++ return len;
++}
++
+ static void
+ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ {
+@@ -1843,70 +1866,54 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
++ ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
++ }
++
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+@@ -1961,11 +1968,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME |
+ ISCSI_HOST_NETDEV_NAME,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = 16,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1979,14 +1989,16 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+ /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
++ .get_host_param = iscsi_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_ctask_init,
++ .init_mgmt_task = iscsi_tcp_mtask_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2011,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..ed0b991 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -103,6 +103,11 @@ struct iscsi_data_task {
+ char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
+ };
+
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
++};
++
+ struct iscsi_r2t_info {
+ __be32 ttt; /* copied from R2T */
+ __be32 exp_statsn; /* copied from R2T */
+@@ -114,7 +119,7 @@ struct iscsi_r2t_info {
+ struct iscsi_data_task dtask; /* Data-Out header buf */
+ };
+
+-struct iscsi_tcp_task {
++struct iscsi_tcp_cmd_task {
+ struct iscsi_hdr_buff {
+ struct iscsi_cmd cmd_hdr;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..9975095 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -38,6 +38,14 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
++{
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
++}
++EXPORT_SYMBOL_GPL(class_to_transport_session);
++
+ /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+ #define SNA32_CHECK 2147483648UL
+
+@@ -79,170 +87,91 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ * xmit thread
+ */
+ if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
++ !list_empty(&session->leadconn->mgmtqueue))
++ scsi_queue_work(session->host,
++ &session->leadconn->xmitwork);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
++static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+ {
+- unsigned exp_len = task->hdr_len + len;
++ unsigned exp_len = ctask->hdr_len + len;
+
+- if (exp_len > task->hdr_max) {
++ if (exp_len > ctask->hdr_max) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
++ ctask->hdr_len = exp_len;
+ return 0;
+ }
+
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++ unsigned hdrlength;
+ int rc;
+
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
++ ctask->hdr_len = 0;
++ rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ if (rc)
+ return rc;
+ hdr->opcode = ISCSI_OP_SCSI_CMD;
+ hdr->flags = ISCSI_ATTR_SIMPLE;
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
++ hdr->itt = build_itt(ctask->itt, session->age);
++ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
+ hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ session->cmdsn++;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ if (sc->cmd_len < MAX_COMMAND_SIZE)
++ memset(&hdr->cdb[sc->cmd_len], 0,
++ MAX_COMMAND_SIZE - sc->cmd_len);
+
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ ctask->imm_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,41 +187,40 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (scsi_bufflen(sc) >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(scsi_bufflen(sc),
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(hdr->dlength, ctask->imm_count);
+ } else
+ zero_data(hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min((session->first_burst),
++ (scsi_bufflen(sc))) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+ /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
++ hdrlength = ctask->hdr_len - sizeof(*hdr);
+
+ WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ hdrlength /= ISCSI_PAD_LEN;
+@@ -300,180 +228,110 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ WARN_ON(hdrlength >= 256);
+ hdr->hlength = hdrlength & 0xFF;
+
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
++ if (conn->session->tt->init_cmd_task(conn->ctask))
++ return EIO;
+
+ conn->scsicmd_pdus_cnt++;
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
++ "cmdsn %d win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ return 0;
+ }
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
+-
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
++ struct scsi_cmnd *sc = ctask->sc;
+
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
+-}
+-
+-void __iscsi_get_task(struct iscsi_task *task)
+-{
+- atomic_inc(&task->refcount);
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ atomic_inc(&ctask->refcount);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+ /*
+ * session lock must be held
+ */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ int err)
+ {
+ struct scsi_cmnd *sc;
+
+- sc = task->sc;
++ sc = ctask->sc;
+ if (!sc)
+ return;
+
+- if (task->state == ISCSI_TASK_PENDING)
++ if (ctask->state == ISCSI_TASK_PENDING)
+ /*
+ * cmd never made it to the xmit thread, so we should not count
+ * the cmd in the sequencing
+ */
+ conn->session->queued_cmdsn--;
+ else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
+
+ sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
+ /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++/**
++ * iscsi_free_mgmt_task - return mgmt task back to pool
++ * @conn: iscsi connection
++ * @mtask: mtask
++ *
++ * Must be called with session lock.
++ */
++void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
++ list_del_init(&mtask->running);
++ if (conn->login_mtask == mtask)
++ return;
+
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
++ if (conn->ping_mtask == mtask)
++ conn->ping_mtask = NULL;
++ __kfifo_put(conn->session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
+ }
++EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
+
+-static struct iscsi_task *
++static struct iscsi_mgmt_task *
+ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+@@ -483,56 +341,29 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ /*
+ * Login and Text are sent serially, in
+ * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
+ */
+- task = conn->login_task;
++ mtask = conn->login_mtask;
+ else {
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*)))
+ return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+ }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+
+ if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
+ } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
++ mtask->data_count = 0;
+
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ INIT_LIST_HEAD(&mtask->running);
++ list_add_tail(&mtask->running, &conn->mgmtqueue);
++ return mtask;
+ }
+
+ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+@@ -546,6 +377,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
++ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+@@ -554,7 +386,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+@@ -562,12 +394,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * then completes the command and task.
+ **/
+ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
++ struct iscsi_cmd_task *ctask, char *data,
+ int datalen)
+ {
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+@@ -591,7 +423,7 @@ invalid_datalen:
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,18 +433,6 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
+-
+ if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ ISCSI_FLAG_CMD_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+@@ -620,17 +440,19 @@ invalid_datalen:
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
++ ISCSI_FLAG_CMD_BIDI_OVERFLOW))
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -655,9 +477,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ {
+ struct iscsi_nopout hdr;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- if (!rhdr && conn->ping_task)
++ if (!rhdr && conn->ping_mtask)
+ return;
+
+ memset(&hdr, 0, sizeof(struct iscsi_nopout));
+@@ -671,9 +493,18 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ } else
+ hdr.ttt = RESERVED_ITT;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
++ if (!mtask) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
++ return;
++ }
++
++ /* only track our nops */
++ if (!rhdr) {
++ conn->ping_mtask = mtask;
++ conn->last_ping = jiffies;
++ }
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+@@ -702,31 +533,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -737,28 +543,108 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+ * queuecommand or send generic. session lock must be held and verify
+ * itt must have been called.
+ */
+-int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, int datalen)
++static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, int datalen)
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+ conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
++ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
++ datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (conn->ping_mtask != mtask) {
++ /*
++ * If this is not in response to one of our
++ * nops then it must be from userspace.
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
++ datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ } else
++ mod_timer(&conn->transport_timer,
++ jiffies + conn->recv_timeout);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+
+ switch(opcode) {
+@@ -785,104 +671,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+
+ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+@@ -896,63 +689,51 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "received itt %x expected session "
++ "age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
++ "with itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,6 +755,61 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
++static void iscsi_prep_mtask(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = mtask->hdr;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++
++ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
++ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, session->age);
++ /*
++ * TODO: We always use immediate, so we never hit this.
++ * If we start to send tmfs or nops as non-immediate then
++ * we should start checking the cmdsn numbers for mgmt tasks.
++ */
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
++ session->queued_cmdsn++;
++ session->cmdsn++;
++ }
++ }
++
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
++ mtask->data_count);
++}
++
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
++{
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc;
++
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
++ conn->session->state = ISCSI_STATE_LOGGING_OUT;
++ spin_unlock_bh(&conn->session->lock);
++
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ spin_lock_bh(&conn->session->lock);
++ if (rc)
++ return rc;
++
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
++ return 0;
++}
++
+ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ {
+ struct iscsi_session *session = conn->session;
+@@ -991,38 +827,37 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ return 0;
+ }
+
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
++static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task = conn->task;
++ struct iscsi_cmd_task *ctask = conn->ctask;
+ int rc;
+
+- __iscsi_get_task(task);
++ __iscsi_get_ctask(ctask);
+ spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
++ rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ if (!rc)
+- /* done with this task */
+- conn->task = NULL;
++ /* done with this ctask */
++ conn->ctask = NULL;
+ return rc;
+ }
+
+ /**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
++ * iscsi_requeue_ctask - requeue ctask to run from session workqueue
++ * @ctask: ctask to requeue
+ *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
++ * LLDs that need to run a ctask from the session workqueue should call
++ * this. The session lock must be held.
+ */
+-void iscsi_requeue_task(struct iscsi_task *task)
++void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+- list_move_tail(&task->running, &conn->requeue);
++ list_move_tail(&ctask->running, &conn->requeue);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
++EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1044,8 +879,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ return -ENODATA;
+ }
+
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ rc = iscsi_xmit_ctask(conn);
++ if (rc)
++ goto again;
++ }
++
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1057,14 +898,17 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ */
+ check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
++ conn->mtask = list_entry(conn->mgmtqueue.next,
++ struct iscsi_mgmt_task, running);
++ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
++ iscsi_free_mgmt_task(conn, conn->mtask);
++ conn->mtask = NULL;
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ iscsi_prep_mtask(conn, conn->mtask);
++ list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1074,21 +918,24 @@ check_mgmt:
+ if (conn->tmf_state == TMF_QUEUED)
+ break;
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
++ fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ continue;
+ }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
++ if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
++ fail_command(conn, conn->ctask, DID_ABORT << 16);
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ /*
+- * we could continuously get new task requests so
++ * we could continuously get new ctask requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+@@ -1106,11 +953,11 @@ check_mgmt:
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
++ conn->ctask = list_entry(conn->requeue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ if (!list_empty(&conn->mgmtqueue))
+@@ -1154,12 +1001,11 @@ enum {
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+@@ -1168,11 +1014,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ host = sc->device->host;
+ spin_unlock(host->host_lock);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ session = iscsi_hostdata(host->hostdata);
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
++ reason = iscsi_session_chkready(session_to_cls(session));
+ if (reason) {
+ sc->result = reason;
+ goto fault;
+@@ -1227,39 +1072,26 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ goto reject;
+ }
+
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
++ session->queued_cmdsn++;
++
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
++ sc->SCp.ptr = (char *)ctask;
+
+- session->queued_cmdsn++;
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
+ spin_unlock(&session->lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ spin_lock(host->host_lock);
+ return 0;
+
+@@ -1272,13 +1104,8 @@ reject:
+ fault:
+ spin_unlock(&session->lock);
+ debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ sc->scsi_done(sc);
+ spin_lock(host->host_lock);
+ return 0;
+ }
+@@ -1295,7 +1122,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+@@ -1309,13 +1136,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+@@ -1377,11 +1200,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ int timeout)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+- if (!task) {
++ if (!mtask) {
+ spin_unlock_bh(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ spin_lock_bh(&session->lock);
+@@ -1397,6 +1220,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);
++ scsi_queue_work(session->host, &conn->xmitwork);
+
+ /*
+ * block eh thread until:
+@@ -1415,7 +1239,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
++ /* if the session drops it will clean up the mtask */
+ if (age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN)
+ return -ENOTCONN;
+@@ -1429,51 +1253,48 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+ int error)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_cmd_task *ctask, *tmp;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
++ conn->ctask = NULL;
+
+ /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+ /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+ }
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
++static void iscsi_suspend_tx(struct iscsi_conn *conn)
+ {
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
++ scsi_flush_work(conn->session->host);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+
+ static void iscsi_start_tx(struct iscsi_conn *conn)
+ {
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+@@ -1484,7 +1305,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+
+ cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
++ session = class_to_transport_session(cls_session);
+
+ debug_scsi("scsi cmd %p timedout\n", scmd);
+
+@@ -1522,7 +1343,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ jiffies))
+ rc = EH_RESET_TIMER;
+ /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
++ if (conn->ping_mtask)
+ rc = EH_RESET_TIMER;
+ done:
+ spin_unlock(&session->lock);
+@@ -1546,7 +1367,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
+
+ recv_timeout *= HZ;
+ last_recv = conn->last_recv;
+- if (conn->ping_task &&
++ if (conn->ping_mtask &&
+ time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+ jiffies)) {
+ iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+@@ -1572,30 +1393,27 @@ done:
+ spin_unlock(&session->lock);
+ }
+
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
++static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_tm *hdr)
+ {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_tm *hdr;
+ int rc, age;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ /*
+@@ -1624,17 +1442,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->eh_abort_cnt++;
+ age = session->age;
+
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
++ if (ctask->state == ISCSI_TASK_PENDING) {
++ fail_command(conn, ctask, DID_ABORT << 16);
+ goto success;
+ }
+
+@@ -1644,7 +1462,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ iscsi_prep_abort_task_pdu(ctask, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ rc = FAILED;
+@@ -1654,20 +1472,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+ iscsi_suspend_tx(conn);
+ /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
++ * clean up task if aborted. grab the recv lock as a writer
+ */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
++ fail_command(conn, ctask, DID_ABORT << 16);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+ iscsi_start_tx(conn);
+ goto success_unlocked;
+ case TMF_TIMEDOUT:
+@@ -1677,7 +1491,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ case TMF_NOT_FOUND:
+ if (!sc->SCp.ptr) {
+ conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ /* ctask completed before tmf abort response */
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+@@ -1690,7 +1504,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ success:
+ spin_unlock_bh(&session->lock);
+ success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+
+@@ -1698,7 +1512,7 @@ failed:
+ spin_unlock_bh(&session->lock);
+ failed_unlocked:
+ debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
++ ctask ? ctask->itt : 0);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+@@ -1716,15 +1530,12 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+
+ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+
+ mutex_lock(&session->eh_mutex);
+@@ -1767,11 +1578,13 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
+-
++ /* need to grab the recv lock then session lock */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_all_commands(conn, sc->device->lun, DID_ERROR);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+@@ -1847,203 +1660,177 @@ void iscsi_pool_free(struct iscsi_pool *q)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
++ * @cmds_max: scsi host can queue
++ * @qdepth: scsi host cmds per lun
++ * @cmd_task_size: LLD ctask private data size
++ * @mgmt_task_size: LLD mtask private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
+- return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
++ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
++ if (qdepth != 0)
++ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
++ "Queue depth must be between 1 and %d.\n",
++ qdepth, ISCSI_MAX_CMD_PER_LUN);
++ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
++ if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
++ cmds_max < 2) {
++ if (cmds_max != 0)
++ printk(KERN_ERR "iscsi: invalid can_queue of %d. "
++ "can_queue must be a power of 2 and between "
++ "2 and %d - setting to %d.\n", cmds_max,
++ ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
++ cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++
++ /* the iscsi layer takes one task for reserve */
++ shost->can_queue = cmds_max - 1;
++ shost->cmd_per_lun = qdepth;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
++ *hostno = shost->host_no;
++
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+ session->fast_abort = 1;
+ session->lu_reset_timeout = 15;
+ session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = cmds_max;
+ session->queued_cmdsn = session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+ mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
+
+- if (iscsi_add_session(cls_session, id))
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
++
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool);
++mgmtpool_alloc_fail:
+ iscsi_pool_free(&session->cmdpool);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
++ iscsi_remove_session(cls_session);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool);
+ iscsi_pool_free(&session->cmdpool);
+
+ kfree(session->password);
+@@ -2051,10 +1838,12 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ kfree(session->username);
+ kfree(session->username_in);
+ kfree(session->targetname);
++ kfree(session->netdev);
++ kfree(session->hwaddress);
+ kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+- iscsi_destroy_session(cls_session);
++ iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,26 +1851,22 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+@@ -2100,30 +1885,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ INIT_LIST_HEAD(&conn->requeue);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+ init_timer(&conn->tmf_timer);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2183,7 +1968,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+ if (session->leadconn == conn)
+ session->leadconn = NULL;
+@@ -2255,7 +2040,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
++ iscsi_unblock_session(session_to_cls(session));
+ wake_up(&conn->ehwait);
+ return 0;
+ }
+@@ -2264,23 +2049,21 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+- conn->task = NULL;
++ conn->mtask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2299,6 +2082,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ }
+
+ /*
++ * The LLD either freed/unset the lock on us, or userspace called
++ * stop but did not create a proper connection (connection was never
++ * bound or it was unbound then stop was called).
++ */
++ if (!conn->recv_lock) {
++ spin_unlock_bh(&session->lock);
++ mutex_unlock(&session->eh_mutex);
++ return;
++ }
++
++ /*
+ * When this is called for the in_login state, we only want to clean
+ * up the login task and connection. We do not need to block and set
+ * the recovery state again
+@@ -2314,6 +2108,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
++
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +2125,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2361,7 +2160,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2500,14 +2299,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,7 +2310,8 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+@@ -2574,15 +2366,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2642,35 +2425,29 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
++ if (!session->netdev)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->netdev);
++ len = sprintf(buf, "%s\n", session->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
++ if (!session->hwaddress)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
++ len = sprintf(buf, "%s\n", session->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
++ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
++ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
++
+ default:
+ return -ENOSYS;
+ }
+@@ -2682,20 +2459,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
++ if (!session->netdev)
++ session->netdev = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
++ if (!session->hwaddress)
++ session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
++ if (!session->initiatorname)
++ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
+ default:
+ return -ENOSYS;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..ca7bb6f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,24 +30,23 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
++#define ISCSI_SESSION_ATTRS 19
+ #define ISCSI_CONN_ATTRS 13
+ #define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_TRANSPORT_VERSION "2.0-869"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+@@ -64,12 +63,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +78,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,142 +116,21 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
++ atomic_set(&ihost->nr_scans, 0);
+
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
++ snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
++ shost->host_no);
+ ihost->scan_workq = create_singlethread_workqueue(
+ ihost->scan_workq_name);
+ if (!ihost->scan_workq)
+@@ -257,10 +139,10 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ }
+
+ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ destroy_workqueue(ihost->scan_workq);
+ return 0;
+@@ -403,24 +285,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+ /**
+ * iscsi_scan_finished - helper to report when running scans are done
+ * @shost: scsi host
+@@ -431,7 +295,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+ */
+ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ /*
+ * qla4xxx will have kicked off some session unblocks before calling
+ * scsi_scan_host, so just wait for them to complete.
+@@ -440,61 +304,22 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
++ return 0;
+ }
+
+ static void iscsi_scan_session(struct work_struct *work)
+@@ -502,14 +327,19 @@ static void iscsi_scan_session(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, scan_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
++ struct iscsi_host *ihost = shost->shost_data;
++ unsigned long flags;
+
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
++ spin_lock_irqsave(&session->lock, flags);
++ if (session->state != ISCSI_SESSION_LOGGED_IN) {
++ spin_unlock_irqrestore(&session->lock, flags);
++ goto done;
++ }
++ spin_unlock_irqrestore(&session->lock, flags);
+
+- iscsi_user_scan_session(&session->dev, &scan_data);
++ scsi_scan_target(&session->dev, 0, session->target_id,
++ SCAN_WILD_CARD, 1);
++done:
+ atomic_dec(&ihost->nr_scans);
+ }
+
+@@ -549,7 +379,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unblock_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ /*
+@@ -617,19 +447,15 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unbind_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
++ if (list_empty(&session->host_list)) {
+ mutex_unlock(&ihost->mutex);
+ return;
+ }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
++ list_del_init(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+ scsi_remove_target(&session->dev);
+@@ -639,18 +465,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ static int iscsi_unbind_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ return queue_work(ihost->scan_workq, &session->unbind_work);
+ }
+
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+@@ -659,6 +485,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->recovery_tmo = 120;
+ session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+ INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+ INIT_WORK(&session->block_work, __iscsi_block_session);
+@@ -671,57 +498,22 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
++ struct iscsi_host *ihost;
+ unsigned long flags;
+- unsigned int id = target_id;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+@@ -737,6 +529,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ list_add(&session->sess_list, &sesslist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
++
+ iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+ return 0;
+
+@@ -750,18 +546,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+ * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+ */
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -797,7 +593,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+ int err;
+
+@@ -863,7 +659,6 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -876,17 +671,18 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * non-zero.
+ */
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -1219,20 +1015,21 @@ int iscsi_session_event(struct iscsi_cls_session *session,
+ EXPORT_SYMBOL_GPL(iscsi_session_event);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.cmds_max,
++ ev->u.c_session.queue_depth,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1307,7 +1104,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +1113,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1408,7 +1193,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,22 +1206,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+@@ -1568,8 +1337,11 @@ iscsi_if_rx(struct sk_buff *skb)
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1349,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1604,16 +1375,17 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+ iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+ iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
++
+ /*
+ * iSCSI session attrs
+ */
+ #define iscsi_session_attr_show(param, perm) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+@@ -1643,14 +1415,11 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+ iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+
+ static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_priv_session_state(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
+ return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+@@ -1658,11 +1427,9 @@ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1677,10 +1444,9 @@ iscsi_priv_session_attr(recovery_tmo, "%d");
+ */
+ #define iscsi_host_attr_show(param) \
+ static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_host_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
++ struct Scsi_Host *shost = transport_class_to_shost(cdev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+ }
+@@ -1697,7 +1463,7 @@ iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1471,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,7 +1479,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1721,7 +1487,7 @@ do { \
+ #define SETUP_HOST_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->host_attrs[count] = &class_device_attr_host_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,24 +1577,22 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
+ transport_container_register(&priv->t.host_attrs);
+
+ SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+@@ -1886,8 +1650,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+ SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+ SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+ SETUP_PRIV_SESSION_RD_ATTR(state);
+
+@@ -1901,9 +1663,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1691,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1712,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,8 +1724,8 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+@@ -1988,8 +1745,6 @@ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -2002,7 +1757,6 @@ static void __exit iscsi_transport_exit(void)
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..7b90b63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -52,7 +49,9 @@ struct device;
+ #endif
+
+ #define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_MGMT_CMDS_MAX 16 /* must be power of 2 */
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+@@ -70,10 +69,7 @@ enum {
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+@@ -86,6 +82,18 @@ enum {
+ ISCSI_DIGEST_SIZE = sizeof(__u32),
+ };
+
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ unsigned data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
++};
+
+ enum {
+ ISCSI_TASK_COMPLETED,
+@@ -93,7 +101,7 @@ enum {
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+ * Because LLDs allocate their hdr differently, this is a pointer
+ * and length to that storage. It must be setup at session
+@@ -110,7 +118,6 @@ struct iscsi_task {
+ /* offset in unsolicited stream (bytes); */
+ unsigned unsol_offset;
+ unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
+ struct iscsi_conn *conn; /* used connection */
+
+@@ -121,9 +128,9 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
++static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+ {
+- return (void*)task->hdr + task->hdr_len;
++ return (void*)ctask->hdr + ctask->hdr_len;
+ }
+
+ /* Connection's states */
+@@ -139,6 +146,11 @@ struct iscsi_conn {
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+@@ -147,7 +159,7 @@ struct iscsi_conn {
+ unsigned long last_ping;
+ int ping_timeout;
+ int recv_timeout;
+- struct iscsi_task *ping_task;
++ struct iscsi_mgmt_task *ping_mtask;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,8 +175,9 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
+@@ -195,6 +208,9 @@ struct iscsi_conn {
+ /* remote portal currently connected to */
+ int portal_port;
+ char portal_address[ISCSI_ADDRESS_BUF_LEN];
++ /* local address */
++ int local_port;
++ char local_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,7 +225,6 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+@@ -230,7 +245,6 @@ enum {
+ };
+
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+ /*
+ * Syncs up the scsi eh thread with the iscsi eh thread when sending
+ * task management functions. This must be taken before the session
+@@ -266,8 +280,10 @@ struct iscsi_session {
+ char *password;
+ char *password_in;
+ char *targetname;
+- char *ifacename;
+ char *initiatorname;
++ /* hw address or netdev iscsi connection is bound to */
++ char *hwaddress;
++ char *netdev;
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +297,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
+ struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -307,44 +315,42 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
++
+ /*
+ * iSCSI host helpers.
+ */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+ extern int iscsi_host_set_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf,
+ int buflen);
+ extern int iscsi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+
+ /*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
++
+ #define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++ iscsi_cls_session_printk(prefix, \
++ (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,29 +359,25 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+
+ #define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
++ iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+ extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+- char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
++extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
++extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+ /*
+ * generic helpers
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..aab1eae 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -30,11 +30,11 @@
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +58,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -86,9 +83,17 @@ struct iscsi_transport {
+ /* LLD sets this to indicate what values it can export to sysfs */
+ uint64_t param_mask;
+ uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint16_t, uint16_t,
++ uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -113,15 +118,20 @@ struct iscsi_transport {
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
+ int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+@@ -162,10 +172,9 @@ enum {
+ ISCSI_SESSION_FREE,
+ };
+
+-#define ISCSI_MAX_TARGET -1
+-
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+ spinlock_t lock;
+ struct work_struct block_work;
+@@ -177,7 +186,7 @@ struct iscsi_cls_session {
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+ int state;
+ int sid; /* session id */
+@@ -194,20 +203,12 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
++struct iscsi_host {
++ struct list_head sessions;
+ atomic_t nr_scans;
+ struct mutex mutex;
+ struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
++ char scan_workq_name[KOBJ_NAME_LEN];
+ };
+
+ /*
+@@ -221,26 +222,22 @@ struct iscsi_endpoint {
+
+ extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+ extern int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_02_count_fmr_align_violations.patch b/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_02_count_fmr_align_violations.patch
new file mode 100644
index 0000000..9bf2d19
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_02_count_fmr_align_violations.patch
@@ -0,0 +1,24 @@
+From 02753dd2caabfe6b1885cb80a8fb8532b416108d Mon Sep 17 00:00:00 2001
+From: Eli Dorfman <elid at voltaire.com>
+Date: Tue, 29 Apr 2008 10:12:39 +0300
+Subject: [PATCH] IB/iSER: Count fmr alignment violations per session
+
+Count fmr alignment violations per session
+as part of the iscsi statistics.
+
+Signed-off-by: Eli Dorfman <elid at voltaire.com>
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 7b90b63..cd3ca63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -225,6 +225,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+--
+1.5.5
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
new file mode 100644
index 0000000..798571f
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
@@ -0,0 +1,151 @@
+From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 29 Jun 2008 15:41:12 +0300
+Subject: [PATCH] copmat patch for RHEL5 and SLES10
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
+ 1 files changed, 54 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index ca7bb6f..0ccd7e2 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -20,6 +20,8 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
++#include <linux/version.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <net/tcp.h>
+@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ * the async scanning code (drivers like iscsi_tcp do login and
+ * scanning from userspace).
+ */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
++ if (shost->hostt->scan_finished) {
++ if (queue_work(ihost->scan_workq, &session->scan_work))
++ atomic_inc(&ihost->nr_scans);
++ }
++#endif
+ }
+
+ /**
+@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ * Malformed skbs with wrong lengths or invalid creds are not processed.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
+ return 0;
+
+ release_nls:
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+@@ -1753,7 +1768,7 @@ unregister_transport_class:
+ static void __exit iscsi_transport_exit(void)
+ {
+ destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
deleted file mode 100644
index 798571f..0000000
--- a/kernel_patches/backport/2.6.16_sles10_sp1/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
-From: Doron Shoham <dorons at voltaire.com>
-Date: Sun, 29 Jun 2008 15:41:12 +0300
-Subject: [PATCH] copmat patch for RHEL5 and SLES10
-
-Signed-off-by: Doron Shoham <dorons at voltaire.com>
----
- drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
- 1 files changed, 54 insertions(+), 39 deletions(-)
-
-diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
-index ca7bb6f..0ccd7e2 100644
---- a/drivers/scsi/scsi_transport_iscsi.c
-+++ b/drivers/scsi/scsi_transport_iscsi.c
-@@ -20,6 +20,8 @@
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-+#include <linux/version.h>
-+#include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/mutex.h>
- #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
-- if (shost->hostt->scan_finished) {
-- if (queue_work(ihost->scan_workq, &session->scan_work))
-- atomic_inc(&ihost->nr_scans);
-- }
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
-+ if (shost->hostt->scan_finished) {
-+ if (queue_work(ihost->scan_workq, &session->scan_work))
-+ atomic_inc(&ihost->nr_scans);
-+ }
-+#endif
- }
-
- /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
- * Malformed skbs with wrong lengths or invalid creds are not processed.
- */
- static void
--iscsi_if_rx(struct sk_buff *skb)
-+iscsi_if_rx(struct sock *sk, int len)
- {
-+ struct sk_buff *skb;
-+
- mutex_lock(&rx_queue_mutex);
-- while (skb->len >= NLMSG_SPACE(0)) {
-- int err;
-- uint32_t rlen;
-- struct nlmsghdr *nlh;
-- struct iscsi_uevent *ev;
--
-- nlh = nlmsg_hdr(skb);
-- if (nlh->nlmsg_len < sizeof(*nlh) ||
-- skb->len < nlh->nlmsg_len) {
-- break;
-+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-+ if (NETLINK_CREDS(skb)->uid) {
-+ skb_pull(skb, skb->len);
-+ goto free_skb;
- }
-
-- ev = NLMSG_DATA(nlh);
-- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-- if (rlen > skb->len)
-- rlen = skb->len;
-+ while (skb->len >= NLMSG_SPACE(0)) {
-+ int err;
-+ uint32_t rlen;
-+ struct nlmsghdr *nlh;
-+ struct iscsi_uevent *ev;
-
-- err = iscsi_if_recv_msg(skb, nlh);
-- if (err) {
-- ev->type = ISCSI_KEVENT_IF_ERROR;
-- ev->iferror = err;
-- }
-- do {
-- /*
-- * special case for GET_STATS:
-- * on success - sending reply and stats from
-- * inside of if_recv_msg(),
-- * on error - fall through.
-- */
-- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ nlh = nlmsg_hdr(skb);
-+ if (nlh->nlmsg_len < sizeof(*nlh) ||
-+ skb->len < nlh->nlmsg_len) {
- break;
-- err = iscsi_if_send_reply(
-- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-- } while (err < 0 && err != -ECONNREFUSED);
-- skb_pull(skb, rlen);
-+ }
-+
-+ ev = NLMSG_DATA(nlh);
-+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-+ if (rlen > skb->len)
-+ rlen = skb->len;
-+
-+ err = iscsi_if_recv_msg(skb, nlh);
-+ if (err) {
-+ ev->type = ISCSI_KEVENT_IF_ERROR;
-+ ev->iferror = err;
-+ }
-+ do {
-+ /*
-+ * special case for GET_STATS:
-+ * on success - sending reply and stats from
-+ * inside of if_recv_msg(),
-+ * on error - fall through.
-+ */
-+ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ break;
-+ err = iscsi_if_send_reply(
-+ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-+ } while (err < 0 && err != -ECONNREFUSED);
-+ skb_pull(skb, rlen);
-+ }
-+free_skb:
-+ kfree_skb(skb);
- }
- mutex_unlock(&rx_queue_mutex);
- }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
- return 0;
-
- release_nls:
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- unregister_session_class:
- transport_class_unregister(&iscsi_session_class);
- unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
- static void __exit iscsi_transport_exit(void)
- {
- destroy_workqueue(iscsi_eh_timer_workq);
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- transport_class_unregister(&iscsi_connection_class);
- transport_class_unregister(&iscsi_session_class);
- transport_class_unregister(&iscsi_host_class);
---
-1.5.3.8
-
diff --git a/kernel_patches/backport/2.6.16_sles10_sp1/iser_01_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.16_sles10_sp1/iser_01_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..155795d
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp1/iser_01_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From ad1e1df62ff096cc90257b0b42e843d0773ae981 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 11:37:50 +0300
+Subject: [PATCH] iser backports
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp1/iser_02_fix_iscsi_if.h b/kernel_patches/backport/2.6.16_sles10_sp1/iser_02_fix_iscsi_if.h
new file mode 100644
index 0000000..02c8a81
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp1/iser_02_fix_iscsi_if.h
@@ -0,0 +1,145 @@
+From c703d2c0ca18a6a5b8f4ecbd5c02654a15fb11ff Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 14:26:17 +0300
+Subject: [PATCH] fix iscsi_if.h
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/iscsi_if.h | 93 ++++++++++++++++++++++-------------------------
+ 1 files changed, 43 insertions(+), 50 deletions(-)
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..e19e584 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -50,7 +50,6 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+ ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+ ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+@@ -79,12 +78,6 @@ struct iscsi_uevent {
+ uint16_t cmds_max;
+ uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -257,49 +250,42 @@ enum iscsi_param {
+
+ ISCSI_PARAM_PING_TMO,
+ ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
++#define ISCSI_USERNAME (1 << ISCSI_PARAM_USERNAME)
++#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN)
++#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD)
++#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN)
++#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT)
++#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO)
++#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO)
++#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO)
++#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO)
++#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO)
+
+ /* iSCSI HBA params */
+ enum iscsi_host_param {
+@@ -310,13 +296,20 @@ enum iscsi_host_param {
+ ISCSI_HOST_PARAM_MAX,
+ };
+
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_HOST_HWADDRESS (1 << ISCSI_HOST_PARAM_HWADDRESS)
++#define ISCSI_HOST_INITIATOR_NAME (1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
++#define ISCSI_HOST_NETDEV_NAME (1 << ISCSI_HOST_PARAM_NETDEV_NAME)
++#define ISCSI_HOST_IPADDRESS (1 << ISCSI_HOST_PARAM_IPADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch b/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
new file mode 100644
index 0000000..cd24137
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
@@ -0,0 +1,4746 @@
+From 89ac09ec66db75fbda1bd77918066fb2ddebac38 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Mon, 25 Aug 2008 16:16:26 +0300
+Subject: [PATCH] iscsi_01_sync_kernel_code_with_release_2.0-869.2
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 529 +++++++-------
+ drivers/scsi/iscsi_tcp.h | 7 +-
+ drivers/scsi/libiscsi.c | 1457 +++++++++++++++--------------------
+ drivers/scsi/scsi_transport_iscsi.c | 500 +++---------
+ include/scsi/libiscsi.h | 108 ++--
+ include/scsi/scsi_transport_iscsi.h | 93 ++--
+ 6 files changed, 1120 insertions(+), 1574 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..8a17867 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -64,10 +64,6 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -498,63 +494,58 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- tcp_task->r2t = NULL;
++ tcp_ctask->r2t = NULL;
+ }
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (tcp_ctask->exp_datasn != datasn) {
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+ return ISCSI_ERR_DATASN;
+ }
+
+- tcp_task->exp_datasn++;
++ tcp_ctask->exp_datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ __FUNCTION__, tcp_ctask->data_offset,
++ tcp_conn->in.datalen, scsi_bufflen(sc));
+ return ISCSI_ERR_DATA_OFFSET;
+ }
+
+@@ -567,8 +558,8 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
++ res_count <= scsi_bufflen(sc)))
++ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+@@ -582,7 +573,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,7 +583,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -603,8 +594,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -624,14 +615,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+@@ -644,23 +635,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_datasn != r2tsn){
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+ return ISCSI_ERR_R2TSN;
+ }
+
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
++ if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ ctask->itt);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+@@ -668,7 +659,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ if (r2t->data_length == 0) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -679,12 +670,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
++ if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ r2t->data_offset, scsi_bufflen(ctask->sc));
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -692,13 +683,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->exp_datasn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+ conn->r2t_pdus_cnt++;
+
+- iscsi_requeue_task(task);
++ iscsi_requeue_ctask(ctask);
+ return 0;
+ }
+
+@@ -741,8 +732,10 @@ static int
+ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+@@ -760,7 +753,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
++ rc = iscsi_verify_itt(conn, hdr, &itt);
+ if (rc)
+ return rc;
+
+@@ -769,21 +762,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
++ ctask = session->cmds[itt];
+ spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
++ rc = iscsi_data_rsp(conn, ctask);
++ spin_unlock(&conn->session->lock);
++ if (rc)
++ return rc;
+ if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+@@ -798,21 +785,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
++ tcp_ctask->data_offset,
+ tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
+- return rc;
++ return iscsi_segment_seek_sg(&tcp_conn->in.segment,
++ scsi_sglist(ctask->sc),
++ scsi_sg_count(ctask->sc),
++ tcp_ctask->data_offset,
++ tcp_conn->in.datalen,
++ iscsi_tcp_process_data_in,
++ rx_hash);
+ }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+@@ -821,17 +804,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
+- else
++ else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ spin_lock(&session->lock);
++ rc = iscsi_r2t_rsp(conn, ctask);
++ spin_unlock(&session->lock);
++ } else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -1193,7 +1174,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
++ debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ conn->hdrdgst_en? ", digest enabled" : "");
+
+ /* Clear the data segment - needs to be filled in by the
+@@ -1202,7 +1183,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+
+ /* If header digest is enabled, compute the CRC and
+ * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
++ * sure that both iscsi_tcp_ctask and mtask have
+ * sufficient room.
+ */
+ if (conn->hdrdgst_en) {
+@@ -1234,7 +1215,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
++ debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ tcp_conn, offset, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+@@ -1259,7 +1240,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
++ debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+ /* Make sure the datalen matches what the caller
+@@ -1277,7 +1258,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1288,7 +1269,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ * Called under connection lock.
+ **/
+ static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -1305,8 +1286,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1324,76 +1305,87 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct scsi_cmnd *sc = ctask->sc;
+ int err;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
+- */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
+-
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+-
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
+- }
+-
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++ tcp_ctask->sent = 0;
++ tcp_ctask->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
++ conn->id, ctask->itt, ctask->imm_count,
++ ctask->unsol_count);
++ iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+
+- if (!task->imm_count)
++ if (!ctask->imm_count)
+ return 0;
+
+ /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
++ err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
++ 0, ctask->imm_count);
+ if (err)
+ return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ tcp_ctask->sent += ctask->imm_count;
++ ctask->imm_count = 0;
++ return 0;
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ **/
++static int
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ int rc;
++
++ /* Flush any pending data first. */
++ rc = iscsi_tcp_flush(conn);
++ if (rc < 0)
++ return rc;
++
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock_bh(&session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock_bh(&session->lock);
++ }
++
+ return 0;
+ }
+
+ /*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
++ * iscsi_tcp_ctask_xmit - xmit normal PDU task
++ * @conn: iscsi connection
++ * @ctask: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+ int rc = 0;
+
+ flush:
+@@ -1402,39 +1394,32 @@ flush:
+ if (rc < 0)
+ return rc;
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
+- return 0;
+- }
+-
+ /* Are we done already? */
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (ctask->unsol_count != 0) {
++ struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+
+ /* Prepare a header for the unsolicited PDU.
+ * The amount of data we want to send will be
+- * in task->data_count.
++ * in ctask->data_count.
+ * FIXME: return the data count instead.
+ */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+
+ debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++ ctask->itt, tcp_ctask->sent, ctask->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
++ tcp_ctask->sent,
++ ctask->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
++ tcp_ctask->sent += ctask->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ goto flush;
+ } else {
+ struct iscsi_session *session = conn->session;
+@@ -1443,22 +1428,22 @@ flush:
+ /* All unsolicited PDUs sent. Check for solicited PDUs.
+ */
+ spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+ /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
++ if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ debug_scsi(" done with r2t %p\n", r2t);
+
+- __kfifo_put(tcp_task->r2tpool.queue,
++ __kfifo_put(tcp_ctask->r2tpool.queue,
+ (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
++ tcp_ctask->r2t = r2t = NULL;
+ }
+ }
+
+ if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ }
+ spin_unlock_bh(&session->lock);
+
+@@ -1469,19 +1454,19 @@ flush:
+ }
+
+ debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
++ r2t, r2t->solicit_datasn - 1, ctask->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+ sizeof(struct iscsi_hdr));
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
+ r2t->data_offset + r2t->sent,
+ r2t->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += r2t->data_count;
++ tcp_ctask->sent += r2t->data_count;
+ r2t->sent += r2t->data_count;
+ goto flush;
+ }
+@@ -1498,7 +1483,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,14 +1493,18 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+ if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ goto free_tcp_conn;
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+@@ -1527,12 +1516,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
++free_tcp_conn:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Could not create connection due to crc32c "
+ "loading error. Make sure the crc32c "
+ "module is built as a module or into the "
+ "kernel\n");
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1553,6 +1544,7 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+
+ spin_lock_bh(&session->lock);
+ tcp_conn->sock = NULL;
++ conn->recv_lock = NULL;
+ spin_unlock_bh(&session->lock);
+ sockfd_put(sock);
+ }
+@@ -1564,32 +1556,20 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+@@ -1640,8 +1620,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1665,8 +1643,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ if (err)
+ goto free_socket;
+
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
++ err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
++ &conn->local_port, kernel_getsockname);
+ if (err)
+ goto free_socket;
+
+@@ -1683,6 +1661,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+@@ -1696,6 +1681,21 @@ free_socket:
+ return err;
+ }
+
++/* called with host lock */
++static void
++iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
++
++ /* Prepare PDU, optionally w/ immediate data */
++ iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
++
++ /* If we have immediate data, attach a payload */
++ if (mtask->data_count)
++ iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
++ mtask->data_count);
++}
++
+ static int
+ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ {
+@@ -1706,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ }
+
+@@ -1818,6 +1818,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ return len;
+ }
+
++static int
++iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
++ char *buf)
++{
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
++ int len;
++
++ switch (param) {
++ case ISCSI_HOST_PARAM_IPADDRESS:
++ spin_lock_bh(&session->lock);
++ if (!session->leadconn)
++ len = -ENODEV;
++ else
++ len = sprintf(buf, "%s\n",
++ session->leadconn->local_address);
++ spin_unlock_bh(&session->lock);
++ break;
++ default:
++ return iscsi_host_get_param(shost, param, buf);
++ }
++ return len;
++}
++
+ static void
+ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ {
+@@ -1843,70 +1866,54 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
++ ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
++ }
++
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+@@ -1961,11 +1968,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME |
+ ISCSI_HOST_NETDEV_NAME,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = 16,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1979,14 +1989,16 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+ /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
++ .get_host_param = iscsi_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_ctask_init,
++ .init_mgmt_task = iscsi_tcp_mtask_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2011,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..ed0b991 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -103,6 +103,11 @@ struct iscsi_data_task {
+ char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
+ };
+
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
++};
++
+ struct iscsi_r2t_info {
+ __be32 ttt; /* copied from R2T */
+ __be32 exp_statsn; /* copied from R2T */
+@@ -114,7 +119,7 @@ struct iscsi_r2t_info {
+ struct iscsi_data_task dtask; /* Data-Out header buf */
+ };
+
+-struct iscsi_tcp_task {
++struct iscsi_tcp_cmd_task {
+ struct iscsi_hdr_buff {
+ struct iscsi_cmd cmd_hdr;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..9975095 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -38,6 +38,14 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
++{
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
++}
++EXPORT_SYMBOL_GPL(class_to_transport_session);
++
+ /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+ #define SNA32_CHECK 2147483648UL
+
+@@ -79,170 +87,91 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ * xmit thread
+ */
+ if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
++ !list_empty(&session->leadconn->mgmtqueue))
++ scsi_queue_work(session->host,
++ &session->leadconn->xmitwork);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
++static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+ {
+- unsigned exp_len = task->hdr_len + len;
++ unsigned exp_len = ctask->hdr_len + len;
+
+- if (exp_len > task->hdr_max) {
++ if (exp_len > ctask->hdr_max) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
++ ctask->hdr_len = exp_len;
+ return 0;
+ }
+
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++ unsigned hdrlength;
+ int rc;
+
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
++ ctask->hdr_len = 0;
++ rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ if (rc)
+ return rc;
+ hdr->opcode = ISCSI_OP_SCSI_CMD;
+ hdr->flags = ISCSI_ATTR_SIMPLE;
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
++ hdr->itt = build_itt(ctask->itt, session->age);
++ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
+ hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ session->cmdsn++;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ if (sc->cmd_len < MAX_COMMAND_SIZE)
++ memset(&hdr->cdb[sc->cmd_len], 0,
++ MAX_COMMAND_SIZE - sc->cmd_len);
+
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ ctask->imm_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,41 +187,40 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (scsi_bufflen(sc) >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(scsi_bufflen(sc),
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(hdr->dlength, ctask->imm_count);
+ } else
+ zero_data(hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min((session->first_burst),
++ (scsi_bufflen(sc))) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+ /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
++ hdrlength = ctask->hdr_len - sizeof(*hdr);
+
+ WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ hdrlength /= ISCSI_PAD_LEN;
+@@ -300,180 +228,110 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ WARN_ON(hdrlength >= 256);
+ hdr->hlength = hdrlength & 0xFF;
+
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
++ if (conn->session->tt->init_cmd_task(conn->ctask))
++ return EIO;
+
+ conn->scsicmd_pdus_cnt++;
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
++ "cmdsn %d win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ return 0;
+ }
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
+-
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
++ struct scsi_cmnd *sc = ctask->sc;
+
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
+-}
+-
+-void __iscsi_get_task(struct iscsi_task *task)
+-{
+- atomic_inc(&task->refcount);
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ atomic_inc(&ctask->refcount);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+ /*
+ * session lock must be held
+ */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ int err)
+ {
+ struct scsi_cmnd *sc;
+
+- sc = task->sc;
++ sc = ctask->sc;
+ if (!sc)
+ return;
+
+- if (task->state == ISCSI_TASK_PENDING)
++ if (ctask->state == ISCSI_TASK_PENDING)
+ /*
+ * cmd never made it to the xmit thread, so we should not count
+ * the cmd in the sequencing
+ */
+ conn->session->queued_cmdsn--;
+ else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
+
+ sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
+ /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++/**
++ * iscsi_free_mgmt_task - return mgmt task back to pool
++ * @conn: iscsi connection
++ * @mtask: mtask
++ *
++ * Must be called with session lock.
++ */
++void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
++ list_del_init(&mtask->running);
++ if (conn->login_mtask == mtask)
++ return;
+
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
++ if (conn->ping_mtask == mtask)
++ conn->ping_mtask = NULL;
++ __kfifo_put(conn->session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
+ }
++EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
+
+-static struct iscsi_task *
++static struct iscsi_mgmt_task *
+ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+@@ -483,56 +341,29 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ /*
+ * Login and Text are sent serially, in
+ * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
+ */
+- task = conn->login_task;
++ mtask = conn->login_mtask;
+ else {
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*)))
+ return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+ }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+
+ if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
+ } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
++ mtask->data_count = 0;
+
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ INIT_LIST_HEAD(&mtask->running);
++ list_add_tail(&mtask->running, &conn->mgmtqueue);
++ return mtask;
+ }
+
+ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+@@ -546,6 +377,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
++ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+@@ -554,7 +386,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+@@ -562,12 +394,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * then completes the command and task.
+ **/
+ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
++ struct iscsi_cmd_task *ctask, char *data,
+ int datalen)
+ {
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+@@ -591,7 +423,7 @@ invalid_datalen:
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,18 +433,6 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
+-
+ if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ ISCSI_FLAG_CMD_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+@@ -620,17 +440,19 @@ invalid_datalen:
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
++ ISCSI_FLAG_CMD_BIDI_OVERFLOW))
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -655,9 +477,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ {
+ struct iscsi_nopout hdr;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- if (!rhdr && conn->ping_task)
++ if (!rhdr && conn->ping_mtask)
+ return;
+
+ memset(&hdr, 0, sizeof(struct iscsi_nopout));
+@@ -671,9 +493,18 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ } else
+ hdr.ttt = RESERVED_ITT;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
++ if (!mtask) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
++ return;
++ }
++
++ /* only track our nops */
++ if (!rhdr) {
++ conn->ping_mtask = mtask;
++ conn->last_ping = jiffies;
++ }
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+@@ -702,31 +533,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -737,28 +543,108 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+ * queuecommand or send generic. session lock must be held and verify
+ * itt must have been called.
+ */
+-int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, int datalen)
++static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, int datalen)
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+ conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
++ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
++ datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (conn->ping_mtask != mtask) {
++ /*
++ * If this is not in response to one of our
++ * nops then it must be from userspace.
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
++ datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ } else
++ mod_timer(&conn->transport_timer,
++ jiffies + conn->recv_timeout);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+
+ switch(opcode) {
+@@ -785,104 +671,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+
+ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+@@ -896,63 +689,51 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "received itt %x expected session "
++ "age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
++ "with itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,6 +755,61 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
++static void iscsi_prep_mtask(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = mtask->hdr;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++
++ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
++ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, session->age);
++ /*
++ * TODO: We always use immediate, so we never hit this.
++ * If we start to send tmfs or nops as non-immediate then
++ * we should start checking the cmdsn numbers for mgmt tasks.
++ */
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
++ session->queued_cmdsn++;
++ session->cmdsn++;
++ }
++ }
++
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
++ mtask->data_count);
++}
++
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
++{
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc;
++
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
++ conn->session->state = ISCSI_STATE_LOGGING_OUT;
++ spin_unlock_bh(&conn->session->lock);
++
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ spin_lock_bh(&conn->session->lock);
++ if (rc)
++ return rc;
++
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
++ return 0;
++}
++
+ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ {
+ struct iscsi_session *session = conn->session;
+@@ -991,38 +827,37 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ return 0;
+ }
+
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
++static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task = conn->task;
++ struct iscsi_cmd_task *ctask = conn->ctask;
+ int rc;
+
+- __iscsi_get_task(task);
++ __iscsi_get_ctask(ctask);
+ spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
++ rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ if (!rc)
+- /* done with this task */
+- conn->task = NULL;
++ /* done with this ctask */
++ conn->ctask = NULL;
+ return rc;
+ }
+
+ /**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
++ * iscsi_requeue_ctask - requeue ctask to run from session workqueue
++ * @ctask: ctask to requeue
+ *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
++ * LLDs that need to run a ctask from the session workqueue should call
++ * this. The session lock must be held.
+ */
+-void iscsi_requeue_task(struct iscsi_task *task)
++void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+- list_move_tail(&task->running, &conn->requeue);
++ list_move_tail(&ctask->running, &conn->requeue);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
++EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1044,8 +879,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ return -ENODATA;
+ }
+
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ rc = iscsi_xmit_ctask(conn);
++ if (rc)
++ goto again;
++ }
++
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1057,14 +898,17 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ */
+ check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
++ conn->mtask = list_entry(conn->mgmtqueue.next,
++ struct iscsi_mgmt_task, running);
++ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
++ iscsi_free_mgmt_task(conn, conn->mtask);
++ conn->mtask = NULL;
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ iscsi_prep_mtask(conn, conn->mtask);
++ list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1074,21 +918,24 @@ check_mgmt:
+ if (conn->tmf_state == TMF_QUEUED)
+ break;
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
++ fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ continue;
+ }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
++ if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
++ fail_command(conn, conn->ctask, DID_ABORT << 16);
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ /*
+- * we could continuously get new task requests so
++ * we could continuously get new ctask requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+@@ -1106,11 +953,11 @@ check_mgmt:
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
++ conn->ctask = list_entry(conn->requeue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ if (!list_empty(&conn->mgmtqueue))
+@@ -1154,12 +1001,11 @@ enum {
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+@@ -1168,11 +1014,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ host = sc->device->host;
+ spin_unlock(host->host_lock);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ session = iscsi_hostdata(host->hostdata);
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
++ reason = iscsi_session_chkready(session_to_cls(session));
+ if (reason) {
+ sc->result = reason;
+ goto fault;
+@@ -1227,39 +1072,26 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ goto reject;
+ }
+
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
++ session->queued_cmdsn++;
++
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
++ sc->SCp.ptr = (char *)ctask;
+
+- session->queued_cmdsn++;
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
+ spin_unlock(&session->lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ spin_lock(host->host_lock);
+ return 0;
+
+@@ -1272,13 +1104,8 @@ reject:
+ fault:
+ spin_unlock(&session->lock);
+ debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ sc->scsi_done(sc);
+ spin_lock(host->host_lock);
+ return 0;
+ }
+@@ -1295,7 +1122,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+@@ -1309,13 +1136,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+@@ -1377,11 +1200,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ int timeout)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+- if (!task) {
++ if (!mtask) {
+ spin_unlock_bh(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ spin_lock_bh(&session->lock);
+@@ -1397,6 +1220,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);
++ scsi_queue_work(session->host, &conn->xmitwork);
+
+ /*
+ * block eh thread until:
+@@ -1415,7 +1239,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
++ /* if the session drops it will clean up the mtask */
+ if (age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN)
+ return -ENOTCONN;
+@@ -1429,51 +1253,48 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+ int error)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_cmd_task *ctask, *tmp;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
++ conn->ctask = NULL;
+
+ /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+ /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+ }
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
++static void iscsi_suspend_tx(struct iscsi_conn *conn)
+ {
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
++ scsi_flush_work(conn->session->host);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+
+ static void iscsi_start_tx(struct iscsi_conn *conn)
+ {
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+@@ -1484,7 +1305,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+
+ cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
++ session = class_to_transport_session(cls_session);
+
+ debug_scsi("scsi cmd %p timedout\n", scmd);
+
+@@ -1522,7 +1343,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ jiffies))
+ rc = EH_RESET_TIMER;
+ /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
++ if (conn->ping_mtask)
+ rc = EH_RESET_TIMER;
+ done:
+ spin_unlock(&session->lock);
+@@ -1546,7 +1367,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
+
+ recv_timeout *= HZ;
+ last_recv = conn->last_recv;
+- if (conn->ping_task &&
++ if (conn->ping_mtask &&
+ time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+ jiffies)) {
+ iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+@@ -1572,30 +1393,27 @@ done:
+ spin_unlock(&session->lock);
+ }
+
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
++static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_tm *hdr)
+ {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_tm *hdr;
+ int rc, age;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ /*
+@@ -1624,17 +1442,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->eh_abort_cnt++;
+ age = session->age;
+
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
++ if (ctask->state == ISCSI_TASK_PENDING) {
++ fail_command(conn, ctask, DID_ABORT << 16);
+ goto success;
+ }
+
+@@ -1644,7 +1462,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ iscsi_prep_abort_task_pdu(ctask, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ rc = FAILED;
+@@ -1654,20 +1472,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+ iscsi_suspend_tx(conn);
+ /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
++ * clean up task if aborted. grab the recv lock as a writer
+ */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
++ fail_command(conn, ctask, DID_ABORT << 16);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+ iscsi_start_tx(conn);
+ goto success_unlocked;
+ case TMF_TIMEDOUT:
+@@ -1677,7 +1491,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ case TMF_NOT_FOUND:
+ if (!sc->SCp.ptr) {
+ conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ /* ctask completed before tmf abort response */
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+@@ -1690,7 +1504,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ success:
+ spin_unlock_bh(&session->lock);
+ success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+
+@@ -1698,7 +1512,7 @@ failed:
+ spin_unlock_bh(&session->lock);
+ failed_unlocked:
+ debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
++ ctask ? ctask->itt : 0);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+@@ -1716,15 +1530,12 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+
+ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+
+ mutex_lock(&session->eh_mutex);
+@@ -1767,11 +1578,13 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
+-
++ /* need to grab the recv lock then session lock */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_all_commands(conn, sc->device->lun, DID_ERROR);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+@@ -1847,203 +1660,177 @@ void iscsi_pool_free(struct iscsi_pool *q)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
++ * @cmds_max: scsi host can queue
++ * @qdepth: scsi host cmds per lun
++ * @cmd_task_size: LLD ctask private data size
++ * @mgmt_task_size: LLD mtask private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
+- return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
++ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
++ if (qdepth != 0)
++ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
++ "Queue depth must be between 1 and %d.\n",
++ qdepth, ISCSI_MAX_CMD_PER_LUN);
++ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
++ if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
++ cmds_max < 2) {
++ if (cmds_max != 0)
++ printk(KERN_ERR "iscsi: invalid can_queue of %d. "
++ "can_queue must be a power of 2 and between "
++ "2 and %d - setting to %d.\n", cmds_max,
++ ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
++ cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++
++ /* the iscsi layer takes one task for reserve */
++ shost->can_queue = cmds_max - 1;
++ shost->cmd_per_lun = qdepth;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
++ *hostno = shost->host_no;
++
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+ session->fast_abort = 1;
+ session->lu_reset_timeout = 15;
+ session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = cmds_max;
+ session->queued_cmdsn = session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+ mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
+
+- if (iscsi_add_session(cls_session, id))
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
++
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool);
++mgmtpool_alloc_fail:
+ iscsi_pool_free(&session->cmdpool);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
++ iscsi_remove_session(cls_session);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool);
+ iscsi_pool_free(&session->cmdpool);
+
+ kfree(session->password);
+@@ -2051,10 +1838,12 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ kfree(session->username);
+ kfree(session->username_in);
+ kfree(session->targetname);
++ kfree(session->netdev);
++ kfree(session->hwaddress);
+ kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+- iscsi_destroy_session(cls_session);
++ iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,26 +1851,22 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+@@ -2100,30 +1885,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ INIT_LIST_HEAD(&conn->requeue);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+ init_timer(&conn->tmf_timer);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2183,7 +1968,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+ if (session->leadconn == conn)
+ session->leadconn = NULL;
+@@ -2255,7 +2040,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
++ iscsi_unblock_session(session_to_cls(session));
+ wake_up(&conn->ehwait);
+ return 0;
+ }
+@@ -2264,23 +2049,21 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+- conn->task = NULL;
++ conn->mtask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2299,6 +2082,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ }
+
+ /*
++ * The LLD either freed/unset the lock on us, or userspace called
++ * stop but did not create a proper connection (connection was never
++ * bound or it was unbound then stop was called).
++ */
++ if (!conn->recv_lock) {
++ spin_unlock_bh(&session->lock);
++ mutex_unlock(&session->eh_mutex);
++ return;
++ }
++
++ /*
+ * When this is called for the in_login state, we only want to clean
+ * up the login task and connection. We do not need to block and set
+ * the recovery state again
+@@ -2314,6 +2108,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
++
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +2125,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2361,7 +2160,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2500,14 +2299,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,7 +2310,8 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+@@ -2574,15 +2366,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2642,35 +2425,29 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
++ if (!session->netdev)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->netdev);
++ len = sprintf(buf, "%s\n", session->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
++ if (!session->hwaddress)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
++ len = sprintf(buf, "%s\n", session->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
++ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
++ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
++
+ default:
+ return -ENOSYS;
+ }
+@@ -2682,20 +2459,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
++ if (!session->netdev)
++ session->netdev = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
++ if (!session->hwaddress)
++ session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
++ if (!session->initiatorname)
++ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
+ default:
+ return -ENOSYS;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..ca7bb6f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,24 +30,23 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
++#define ISCSI_SESSION_ATTRS 19
+ #define ISCSI_CONN_ATTRS 13
+ #define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_TRANSPORT_VERSION "2.0-869"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+@@ -64,12 +63,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +78,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,142 +116,21 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
++ atomic_set(&ihost->nr_scans, 0);
+
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
++ snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
++ shost->host_no);
+ ihost->scan_workq = create_singlethread_workqueue(
+ ihost->scan_workq_name);
+ if (!ihost->scan_workq)
+@@ -257,10 +139,10 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ }
+
+ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ destroy_workqueue(ihost->scan_workq);
+ return 0;
+@@ -403,24 +285,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+ /**
+ * iscsi_scan_finished - helper to report when running scans are done
+ * @shost: scsi host
+@@ -431,7 +295,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+ */
+ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ /*
+ * qla4xxx will have kicked off some session unblocks before calling
+ * scsi_scan_host, so just wait for them to complete.
+@@ -440,61 +304,22 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
++ return 0;
+ }
+
+ static void iscsi_scan_session(struct work_struct *work)
+@@ -502,14 +327,19 @@ static void iscsi_scan_session(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, scan_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
++ struct iscsi_host *ihost = shost->shost_data;
++ unsigned long flags;
+
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
++ spin_lock_irqsave(&session->lock, flags);
++ if (session->state != ISCSI_SESSION_LOGGED_IN) {
++ spin_unlock_irqrestore(&session->lock, flags);
++ goto done;
++ }
++ spin_unlock_irqrestore(&session->lock, flags);
+
+- iscsi_user_scan_session(&session->dev, &scan_data);
++ scsi_scan_target(&session->dev, 0, session->target_id,
++ SCAN_WILD_CARD, 1);
++done:
+ atomic_dec(&ihost->nr_scans);
+ }
+
+@@ -549,7 +379,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unblock_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ /*
+@@ -617,19 +447,15 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unbind_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
++ if (list_empty(&session->host_list)) {
+ mutex_unlock(&ihost->mutex);
+ return;
+ }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
++ list_del_init(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+ scsi_remove_target(&session->dev);
+@@ -639,18 +465,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ static int iscsi_unbind_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ return queue_work(ihost->scan_workq, &session->unbind_work);
+ }
+
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+@@ -659,6 +485,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->recovery_tmo = 120;
+ session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+ INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+ INIT_WORK(&session->block_work, __iscsi_block_session);
+@@ -671,57 +498,22 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
++ struct iscsi_host *ihost;
+ unsigned long flags;
+- unsigned int id = target_id;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+@@ -737,6 +529,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ list_add(&session->sess_list, &sesslist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
++
+ iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+ return 0;
+
+@@ -750,18 +546,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+ * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+ */
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -797,7 +593,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+ int err;
+
+@@ -863,7 +659,6 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -876,17 +671,18 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * non-zero.
+ */
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -1219,20 +1015,21 @@ int iscsi_session_event(struct iscsi_cls_session *session,
+ EXPORT_SYMBOL_GPL(iscsi_session_event);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.cmds_max,
++ ev->u.c_session.queue_depth,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1307,7 +1104,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +1113,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1408,7 +1193,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,22 +1206,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+@@ -1568,8 +1337,11 @@ iscsi_if_rx(struct sk_buff *skb)
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1349,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1604,16 +1375,17 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+ iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+ iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
++
+ /*
+ * iSCSI session attrs
+ */
+ #define iscsi_session_attr_show(param, perm) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+@@ -1643,14 +1415,11 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+ iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+
+ static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_priv_session_state(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
+ return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+@@ -1658,11 +1427,9 @@ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1677,10 +1444,9 @@ iscsi_priv_session_attr(recovery_tmo, "%d");
+ */
+ #define iscsi_host_attr_show(param) \
+ static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_host_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
++ struct Scsi_Host *shost = transport_class_to_shost(cdev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+ }
+@@ -1697,7 +1463,7 @@ iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1471,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,7 +1479,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1721,7 +1487,7 @@ do { \
+ #define SETUP_HOST_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->host_attrs[count] = &class_device_attr_host_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,24 +1577,22 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
+ transport_container_register(&priv->t.host_attrs);
+
+ SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+@@ -1886,8 +1650,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+ SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+ SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+ SETUP_PRIV_SESSION_RD_ATTR(state);
+
+@@ -1901,9 +1663,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1691,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1712,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,8 +1724,8 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+@@ -1988,8 +1745,6 @@ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -2002,7 +1757,6 @@ static void __exit iscsi_transport_exit(void)
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..7b90b63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -52,7 +49,9 @@ struct device;
+ #endif
+
+ #define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_MGMT_CMDS_MAX 16 /* must be power of 2 */
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+@@ -70,10 +69,7 @@ enum {
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+@@ -86,6 +82,18 @@ enum {
+ ISCSI_DIGEST_SIZE = sizeof(__u32),
+ };
+
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ unsigned data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
++};
+
+ enum {
+ ISCSI_TASK_COMPLETED,
+@@ -93,7 +101,7 @@ enum {
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+ * Because LLDs allocate their hdr differently, this is a pointer
+ * and length to that storage. It must be setup at session
+@@ -110,7 +118,6 @@ struct iscsi_task {
+ /* offset in unsolicited stream (bytes); */
+ unsigned unsol_offset;
+ unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
+ struct iscsi_conn *conn; /* used connection */
+
+@@ -121,9 +128,9 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
++static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+ {
+- return (void*)task->hdr + task->hdr_len;
++ return (void*)ctask->hdr + ctask->hdr_len;
+ }
+
+ /* Connection's states */
+@@ -139,6 +146,11 @@ struct iscsi_conn {
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+@@ -147,7 +159,7 @@ struct iscsi_conn {
+ unsigned long last_ping;
+ int ping_timeout;
+ int recv_timeout;
+- struct iscsi_task *ping_task;
++ struct iscsi_mgmt_task *ping_mtask;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,8 +175,9 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
+@@ -195,6 +208,9 @@ struct iscsi_conn {
+ /* remote portal currently connected to */
+ int portal_port;
+ char portal_address[ISCSI_ADDRESS_BUF_LEN];
++ /* local address */
++ int local_port;
++ char local_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,7 +225,6 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+@@ -230,7 +245,6 @@ enum {
+ };
+
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+ /*
+ * Syncs up the scsi eh thread with the iscsi eh thread when sending
+ * task management functions. This must be taken before the session
+@@ -266,8 +280,10 @@ struct iscsi_session {
+ char *password;
+ char *password_in;
+ char *targetname;
+- char *ifacename;
+ char *initiatorname;
++ /* hw address or netdev iscsi connection is bound to */
++ char *hwaddress;
++ char *netdev;
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +297,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
+ struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -307,44 +315,42 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
++
+ /*
+ * iSCSI host helpers.
+ */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+ extern int iscsi_host_set_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf,
+ int buflen);
+ extern int iscsi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+
+ /*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
++
+ #define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++ iscsi_cls_session_printk(prefix, \
++ (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,29 +359,25 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+
+ #define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
++ iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+ extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+- char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
++extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
++extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+ /*
+ * generic helpers
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..aab1eae 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -30,11 +30,11 @@
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +58,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -86,9 +83,17 @@ struct iscsi_transport {
+ /* LLD sets this to indicate what values it can export to sysfs */
+ uint64_t param_mask;
+ uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint16_t, uint16_t,
++ uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -113,15 +118,20 @@ struct iscsi_transport {
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
+ int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+@@ -162,10 +172,9 @@ enum {
+ ISCSI_SESSION_FREE,
+ };
+
+-#define ISCSI_MAX_TARGET -1
+-
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+ spinlock_t lock;
+ struct work_struct block_work;
+@@ -177,7 +186,7 @@ struct iscsi_cls_session {
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+ int state;
+ int sid; /* session id */
+@@ -194,20 +203,12 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
++struct iscsi_host {
++ struct list_head sessions;
+ atomic_t nr_scans;
+ struct mutex mutex;
+ struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
++ char scan_workq_name[KOBJ_NAME_LEN];
+ };
+
+ /*
+@@ -221,26 +222,22 @@ struct iscsi_endpoint {
+
+ extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+ extern int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_02_count_fmr_align_violations.patch b/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_02_count_fmr_align_violations.patch
new file mode 100644
index 0000000..9bf2d19
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_02_count_fmr_align_violations.patch
@@ -0,0 +1,24 @@
+From 02753dd2caabfe6b1885cb80a8fb8532b416108d Mon Sep 17 00:00:00 2001
+From: Eli Dorfman <elid at voltaire.com>
+Date: Tue, 29 Apr 2008 10:12:39 +0300
+Subject: [PATCH] IB/iSER: Count fmr alignment violations per session
+
+Count fmr alignment violations per session
+as part of the iscsi statistics.
+
+Signed-off-by: Eli Dorfman <elid at voltaire.com>
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 7b90b63..cd3ca63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -225,6 +225,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+--
+1.5.5
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
new file mode 100644
index 0000000..798571f
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
@@ -0,0 +1,151 @@
+From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 29 Jun 2008 15:41:12 +0300
+Subject: [PATCH] copmat patch for RHEL5 and SLES10
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
+ 1 files changed, 54 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index ca7bb6f..0ccd7e2 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -20,6 +20,8 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
++#include <linux/version.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <net/tcp.h>
+@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ * the async scanning code (drivers like iscsi_tcp do login and
+ * scanning from userspace).
+ */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
++ if (shost->hostt->scan_finished) {
++ if (queue_work(ihost->scan_workq, &session->scan_work))
++ atomic_inc(&ihost->nr_scans);
++ }
++#endif
+ }
+
+ /**
+@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ * Malformed skbs with wrong lengths or invalid creds are not processed.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
+ return 0;
+
+ release_nls:
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+@@ -1753,7 +1768,7 @@ unregister_transport_class:
+ static void __exit iscsi_transport_exit(void)
+ {
+ destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
deleted file mode 100644
index 798571f..0000000
--- a/kernel_patches/backport/2.6.16_sles10_sp2/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
-From: Doron Shoham <dorons at voltaire.com>
-Date: Sun, 29 Jun 2008 15:41:12 +0300
-Subject: [PATCH] copmat patch for RHEL5 and SLES10
-
-Signed-off-by: Doron Shoham <dorons at voltaire.com>
----
- drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
- 1 files changed, 54 insertions(+), 39 deletions(-)
-
-diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
-index ca7bb6f..0ccd7e2 100644
---- a/drivers/scsi/scsi_transport_iscsi.c
-+++ b/drivers/scsi/scsi_transport_iscsi.c
-@@ -20,6 +20,8 @@
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-+#include <linux/version.h>
-+#include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/mutex.h>
- #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
-- if (shost->hostt->scan_finished) {
-- if (queue_work(ihost->scan_workq, &session->scan_work))
-- atomic_inc(&ihost->nr_scans);
-- }
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
-+ if (shost->hostt->scan_finished) {
-+ if (queue_work(ihost->scan_workq, &session->scan_work))
-+ atomic_inc(&ihost->nr_scans);
-+ }
-+#endif
- }
-
- /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
- * Malformed skbs with wrong lengths or invalid creds are not processed.
- */
- static void
--iscsi_if_rx(struct sk_buff *skb)
-+iscsi_if_rx(struct sock *sk, int len)
- {
-+ struct sk_buff *skb;
-+
- mutex_lock(&rx_queue_mutex);
-- while (skb->len >= NLMSG_SPACE(0)) {
-- int err;
-- uint32_t rlen;
-- struct nlmsghdr *nlh;
-- struct iscsi_uevent *ev;
--
-- nlh = nlmsg_hdr(skb);
-- if (nlh->nlmsg_len < sizeof(*nlh) ||
-- skb->len < nlh->nlmsg_len) {
-- break;
-+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-+ if (NETLINK_CREDS(skb)->uid) {
-+ skb_pull(skb, skb->len);
-+ goto free_skb;
- }
-
-- ev = NLMSG_DATA(nlh);
-- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-- if (rlen > skb->len)
-- rlen = skb->len;
-+ while (skb->len >= NLMSG_SPACE(0)) {
-+ int err;
-+ uint32_t rlen;
-+ struct nlmsghdr *nlh;
-+ struct iscsi_uevent *ev;
-
-- err = iscsi_if_recv_msg(skb, nlh);
-- if (err) {
-- ev->type = ISCSI_KEVENT_IF_ERROR;
-- ev->iferror = err;
-- }
-- do {
-- /*
-- * special case for GET_STATS:
-- * on success - sending reply and stats from
-- * inside of if_recv_msg(),
-- * on error - fall through.
-- */
-- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ nlh = nlmsg_hdr(skb);
-+ if (nlh->nlmsg_len < sizeof(*nlh) ||
-+ skb->len < nlh->nlmsg_len) {
- break;
-- err = iscsi_if_send_reply(
-- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-- } while (err < 0 && err != -ECONNREFUSED);
-- skb_pull(skb, rlen);
-+ }
-+
-+ ev = NLMSG_DATA(nlh);
-+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-+ if (rlen > skb->len)
-+ rlen = skb->len;
-+
-+ err = iscsi_if_recv_msg(skb, nlh);
-+ if (err) {
-+ ev->type = ISCSI_KEVENT_IF_ERROR;
-+ ev->iferror = err;
-+ }
-+ do {
-+ /*
-+ * special case for GET_STATS:
-+ * on success - sending reply and stats from
-+ * inside of if_recv_msg(),
-+ * on error - fall through.
-+ */
-+ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ break;
-+ err = iscsi_if_send_reply(
-+ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-+ } while (err < 0 && err != -ECONNREFUSED);
-+ skb_pull(skb, rlen);
-+ }
-+free_skb:
-+ kfree_skb(skb);
- }
- mutex_unlock(&rx_queue_mutex);
- }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
- return 0;
-
- release_nls:
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- unregister_session_class:
- transport_class_unregister(&iscsi_session_class);
- unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
- static void __exit iscsi_transport_exit(void)
- {
- destroy_workqueue(iscsi_eh_timer_workq);
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- transport_class_unregister(&iscsi_connection_class);
- transport_class_unregister(&iscsi_session_class);
- transport_class_unregister(&iscsi_host_class);
---
-1.5.3.8
-
diff --git a/kernel_patches/backport/2.6.16_sles10_sp2/iser_01_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.16_sles10_sp2/iser_01_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..155795d
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp2/iser_01_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From ad1e1df62ff096cc90257b0b42e843d0773ae981 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 11:37:50 +0300
+Subject: [PATCH] iser backports
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.16_sles10_sp2/iser_02_fix_iscsi_if.h b/kernel_patches/backport/2.6.16_sles10_sp2/iser_02_fix_iscsi_if.h
new file mode 100644
index 0000000..02c8a81
--- /dev/null
+++ b/kernel_patches/backport/2.6.16_sles10_sp2/iser_02_fix_iscsi_if.h
@@ -0,0 +1,145 @@
+From c703d2c0ca18a6a5b8f4ecbd5c02654a15fb11ff Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 14:26:17 +0300
+Subject: [PATCH] fix iscsi_if.h
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/iscsi_if.h | 93 ++++++++++++++++++++++-------------------------
+ 1 files changed, 43 insertions(+), 50 deletions(-)
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..e19e584 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -50,7 +50,6 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+ ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+ ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+@@ -79,12 +78,6 @@ struct iscsi_uevent {
+ uint16_t cmds_max;
+ uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -257,49 +250,42 @@ enum iscsi_param {
+
+ ISCSI_PARAM_PING_TMO,
+ ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
++#define ISCSI_USERNAME (1 << ISCSI_PARAM_USERNAME)
++#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN)
++#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD)
++#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN)
++#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT)
++#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO)
++#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO)
++#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO)
++#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO)
++#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO)
+
+ /* iSCSI HBA params */
+ enum iscsi_host_param {
+@@ -310,13 +296,20 @@ enum iscsi_host_param {
+ ISCSI_HOST_PARAM_MAX,
+ };
+
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_HOST_HWADDRESS (1 << ISCSI_HOST_PARAM_HWADDRESS)
++#define ISCSI_HOST_INITIATOR_NAME (1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
++#define ISCSI_HOST_NETDEV_NAME (1 << ISCSI_HOST_PARAM_NETDEV_NAME)
++#define ISCSI_HOST_IPADDRESS (1 << ISCSI_HOST_PARAM_IPADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18-EL5.1/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch b/kernel_patches/backport/2.6.18-EL5.1/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
new file mode 100644
index 0000000..cd24137
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.1/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
@@ -0,0 +1,4746 @@
+From 89ac09ec66db75fbda1bd77918066fb2ddebac38 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Mon, 25 Aug 2008 16:16:26 +0300
+Subject: [PATCH] iscsi_01_sync_kernel_code_with_release_2.0-869.2
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 529 +++++++-------
+ drivers/scsi/iscsi_tcp.h | 7 +-
+ drivers/scsi/libiscsi.c | 1457 +++++++++++++++--------------------
+ drivers/scsi/scsi_transport_iscsi.c | 500 +++---------
+ include/scsi/libiscsi.h | 108 ++--
+ include/scsi/scsi_transport_iscsi.h | 93 ++--
+ 6 files changed, 1120 insertions(+), 1574 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..8a17867 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -64,10 +64,6 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -498,63 +494,58 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- tcp_task->r2t = NULL;
++ tcp_ctask->r2t = NULL;
+ }
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (tcp_ctask->exp_datasn != datasn) {
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+ return ISCSI_ERR_DATASN;
+ }
+
+- tcp_task->exp_datasn++;
++ tcp_ctask->exp_datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ __FUNCTION__, tcp_ctask->data_offset,
++ tcp_conn->in.datalen, scsi_bufflen(sc));
+ return ISCSI_ERR_DATA_OFFSET;
+ }
+
+@@ -567,8 +558,8 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
++ res_count <= scsi_bufflen(sc)))
++ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+@@ -582,7 +573,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,7 +583,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -603,8 +594,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -624,14 +615,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+@@ -644,23 +635,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_datasn != r2tsn){
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+ return ISCSI_ERR_R2TSN;
+ }
+
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
++ if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ ctask->itt);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+@@ -668,7 +659,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ if (r2t->data_length == 0) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -679,12 +670,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
++ if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ r2t->data_offset, scsi_bufflen(ctask->sc));
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -692,13 +683,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->exp_datasn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+ conn->r2t_pdus_cnt++;
+
+- iscsi_requeue_task(task);
++ iscsi_requeue_ctask(ctask);
+ return 0;
+ }
+
+@@ -741,8 +732,10 @@ static int
+ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+@@ -760,7 +753,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
++ rc = iscsi_verify_itt(conn, hdr, &itt);
+ if (rc)
+ return rc;
+
+@@ -769,21 +762,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
++ ctask = session->cmds[itt];
+ spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
++ rc = iscsi_data_rsp(conn, ctask);
++ spin_unlock(&conn->session->lock);
++ if (rc)
++ return rc;
+ if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+@@ -798,21 +785,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
++ tcp_ctask->data_offset,
+ tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
+- return rc;
++ return iscsi_segment_seek_sg(&tcp_conn->in.segment,
++ scsi_sglist(ctask->sc),
++ scsi_sg_count(ctask->sc),
++ tcp_ctask->data_offset,
++ tcp_conn->in.datalen,
++ iscsi_tcp_process_data_in,
++ rx_hash);
+ }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+@@ -821,17 +804,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
+- else
++ else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ spin_lock(&session->lock);
++ rc = iscsi_r2t_rsp(conn, ctask);
++ spin_unlock(&session->lock);
++ } else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -1193,7 +1174,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
++ debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ conn->hdrdgst_en? ", digest enabled" : "");
+
+ /* Clear the data segment - needs to be filled in by the
+@@ -1202,7 +1183,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+
+ /* If header digest is enabled, compute the CRC and
+ * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
++ * sure that both iscsi_tcp_ctask and mtask have
+ * sufficient room.
+ */
+ if (conn->hdrdgst_en) {
+@@ -1234,7 +1215,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
++ debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ tcp_conn, offset, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+@@ -1259,7 +1240,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
++ debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+ /* Make sure the datalen matches what the caller
+@@ -1277,7 +1258,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1288,7 +1269,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ * Called under connection lock.
+ **/
+ static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -1305,8 +1286,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1324,76 +1305,87 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct scsi_cmnd *sc = ctask->sc;
+ int err;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
+- */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
+-
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+-
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
+- }
+-
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++ tcp_ctask->sent = 0;
++ tcp_ctask->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
++ conn->id, ctask->itt, ctask->imm_count,
++ ctask->unsol_count);
++ iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+
+- if (!task->imm_count)
++ if (!ctask->imm_count)
+ return 0;
+
+ /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
++ err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
++ 0, ctask->imm_count);
+ if (err)
+ return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ tcp_ctask->sent += ctask->imm_count;
++ ctask->imm_count = 0;
++ return 0;
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ **/
++static int
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ int rc;
++
++ /* Flush any pending data first. */
++ rc = iscsi_tcp_flush(conn);
++ if (rc < 0)
++ return rc;
++
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock_bh(&session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock_bh(&session->lock);
++ }
++
+ return 0;
+ }
+
+ /*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
++ * iscsi_tcp_ctask_xmit - xmit normal PDU task
++ * @conn: iscsi connection
++ * @ctask: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+ int rc = 0;
+
+ flush:
+@@ -1402,39 +1394,32 @@ flush:
+ if (rc < 0)
+ return rc;
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
+- return 0;
+- }
+-
+ /* Are we done already? */
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (ctask->unsol_count != 0) {
++ struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+
+ /* Prepare a header for the unsolicited PDU.
+ * The amount of data we want to send will be
+- * in task->data_count.
++ * in ctask->data_count.
+ * FIXME: return the data count instead.
+ */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+
+ debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++ ctask->itt, tcp_ctask->sent, ctask->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
++ tcp_ctask->sent,
++ ctask->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
++ tcp_ctask->sent += ctask->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ goto flush;
+ } else {
+ struct iscsi_session *session = conn->session;
+@@ -1443,22 +1428,22 @@ flush:
+ /* All unsolicited PDUs sent. Check for solicited PDUs.
+ */
+ spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+ /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
++ if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ debug_scsi(" done with r2t %p\n", r2t);
+
+- __kfifo_put(tcp_task->r2tpool.queue,
++ __kfifo_put(tcp_ctask->r2tpool.queue,
+ (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
++ tcp_ctask->r2t = r2t = NULL;
+ }
+ }
+
+ if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ }
+ spin_unlock_bh(&session->lock);
+
+@@ -1469,19 +1454,19 @@ flush:
+ }
+
+ debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
++ r2t, r2t->solicit_datasn - 1, ctask->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+ sizeof(struct iscsi_hdr));
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
+ r2t->data_offset + r2t->sent,
+ r2t->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += r2t->data_count;
++ tcp_ctask->sent += r2t->data_count;
+ r2t->sent += r2t->data_count;
+ goto flush;
+ }
+@@ -1498,7 +1483,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,14 +1493,18 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+ if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ goto free_tcp_conn;
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+@@ -1527,12 +1516,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
++free_tcp_conn:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Could not create connection due to crc32c "
+ "loading error. Make sure the crc32c "
+ "module is built as a module or into the "
+ "kernel\n");
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1553,6 +1544,7 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+
+ spin_lock_bh(&session->lock);
+ tcp_conn->sock = NULL;
++ conn->recv_lock = NULL;
+ spin_unlock_bh(&session->lock);
+ sockfd_put(sock);
+ }
+@@ -1564,32 +1556,20 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+@@ -1640,8 +1620,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1665,8 +1643,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ if (err)
+ goto free_socket;
+
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
++ err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
++ &conn->local_port, kernel_getsockname);
+ if (err)
+ goto free_socket;
+
+@@ -1683,6 +1661,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+@@ -1696,6 +1681,21 @@ free_socket:
+ return err;
+ }
+
++/* called with host lock */
++static void
++iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
++
++ /* Prepare PDU, optionally w/ immediate data */
++ iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
++
++ /* If we have immediate data, attach a payload */
++ if (mtask->data_count)
++ iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
++ mtask->data_count);
++}
++
+ static int
+ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ {
+@@ -1706,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ }
+
+@@ -1818,6 +1818,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ return len;
+ }
+
++static int
++iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
++ char *buf)
++{
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
++ int len;
++
++ switch (param) {
++ case ISCSI_HOST_PARAM_IPADDRESS:
++ spin_lock_bh(&session->lock);
++ if (!session->leadconn)
++ len = -ENODEV;
++ else
++ len = sprintf(buf, "%s\n",
++ session->leadconn->local_address);
++ spin_unlock_bh(&session->lock);
++ break;
++ default:
++ return iscsi_host_get_param(shost, param, buf);
++ }
++ return len;
++}
++
+ static void
+ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ {
+@@ -1843,70 +1866,54 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
++ ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
++ }
++
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+@@ -1961,11 +1968,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME |
+ ISCSI_HOST_NETDEV_NAME,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = 16,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1979,14 +1989,16 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+ /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
++ .get_host_param = iscsi_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_ctask_init,
++ .init_mgmt_task = iscsi_tcp_mtask_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2011,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..ed0b991 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -103,6 +103,11 @@ struct iscsi_data_task {
+ char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
+ };
+
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
++};
++
+ struct iscsi_r2t_info {
+ __be32 ttt; /* copied from R2T */
+ __be32 exp_statsn; /* copied from R2T */
+@@ -114,7 +119,7 @@ struct iscsi_r2t_info {
+ struct iscsi_data_task dtask; /* Data-Out header buf */
+ };
+
+-struct iscsi_tcp_task {
++struct iscsi_tcp_cmd_task {
+ struct iscsi_hdr_buff {
+ struct iscsi_cmd cmd_hdr;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..9975095 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -38,6 +38,14 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
++{
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
++}
++EXPORT_SYMBOL_GPL(class_to_transport_session);
++
+ /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+ #define SNA32_CHECK 2147483648UL
+
+@@ -79,170 +87,91 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ * xmit thread
+ */
+ if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
++ !list_empty(&session->leadconn->mgmtqueue))
++ scsi_queue_work(session->host,
++ &session->leadconn->xmitwork);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
++static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+ {
+- unsigned exp_len = task->hdr_len + len;
++ unsigned exp_len = ctask->hdr_len + len;
+
+- if (exp_len > task->hdr_max) {
++ if (exp_len > ctask->hdr_max) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
++ ctask->hdr_len = exp_len;
+ return 0;
+ }
+
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++ unsigned hdrlength;
+ int rc;
+
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
++ ctask->hdr_len = 0;
++ rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ if (rc)
+ return rc;
+ hdr->opcode = ISCSI_OP_SCSI_CMD;
+ hdr->flags = ISCSI_ATTR_SIMPLE;
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
++ hdr->itt = build_itt(ctask->itt, session->age);
++ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
+ hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ session->cmdsn++;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ if (sc->cmd_len < MAX_COMMAND_SIZE)
++ memset(&hdr->cdb[sc->cmd_len], 0,
++ MAX_COMMAND_SIZE - sc->cmd_len);
+
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ ctask->imm_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,41 +187,40 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (scsi_bufflen(sc) >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(scsi_bufflen(sc),
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(hdr->dlength, ctask->imm_count);
+ } else
+ zero_data(hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min((session->first_burst),
++ (scsi_bufflen(sc))) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+ /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
++ hdrlength = ctask->hdr_len - sizeof(*hdr);
+
+ WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ hdrlength /= ISCSI_PAD_LEN;
+@@ -300,180 +228,110 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ WARN_ON(hdrlength >= 256);
+ hdr->hlength = hdrlength & 0xFF;
+
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
++ if (conn->session->tt->init_cmd_task(conn->ctask))
++ return EIO;
+
+ conn->scsicmd_pdus_cnt++;
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
++ "cmdsn %d win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ return 0;
+ }
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
+-
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
++ struct scsi_cmnd *sc = ctask->sc;
+
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
+-}
+-
+-void __iscsi_get_task(struct iscsi_task *task)
+-{
+- atomic_inc(&task->refcount);
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ atomic_inc(&ctask->refcount);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+ /*
+ * session lock must be held
+ */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ int err)
+ {
+ struct scsi_cmnd *sc;
+
+- sc = task->sc;
++ sc = ctask->sc;
+ if (!sc)
+ return;
+
+- if (task->state == ISCSI_TASK_PENDING)
++ if (ctask->state == ISCSI_TASK_PENDING)
+ /*
+ * cmd never made it to the xmit thread, so we should not count
+ * the cmd in the sequencing
+ */
+ conn->session->queued_cmdsn--;
+ else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
+
+ sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
+ /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++/**
++ * iscsi_free_mgmt_task - return mgmt task back to pool
++ * @conn: iscsi connection
++ * @mtask: mtask
++ *
++ * Must be called with session lock.
++ */
++void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
++ list_del_init(&mtask->running);
++ if (conn->login_mtask == mtask)
++ return;
+
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
++ if (conn->ping_mtask == mtask)
++ conn->ping_mtask = NULL;
++ __kfifo_put(conn->session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
+ }
++EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
+
+-static struct iscsi_task *
++static struct iscsi_mgmt_task *
+ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+@@ -483,56 +341,29 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ /*
+ * Login and Text are sent serially, in
+ * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
+ */
+- task = conn->login_task;
++ mtask = conn->login_mtask;
+ else {
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*)))
+ return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+ }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+
+ if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
+ } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
++ mtask->data_count = 0;
+
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ INIT_LIST_HEAD(&mtask->running);
++ list_add_tail(&mtask->running, &conn->mgmtqueue);
++ return mtask;
+ }
+
+ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+@@ -546,6 +377,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
++ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+@@ -554,7 +386,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+@@ -562,12 +394,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * then completes the command and task.
+ **/
+ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
++ struct iscsi_cmd_task *ctask, char *data,
+ int datalen)
+ {
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+@@ -591,7 +423,7 @@ invalid_datalen:
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,18 +433,6 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
+-
+ if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ ISCSI_FLAG_CMD_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+@@ -620,17 +440,19 @@ invalid_datalen:
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
++ ISCSI_FLAG_CMD_BIDI_OVERFLOW))
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -655,9 +477,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ {
+ struct iscsi_nopout hdr;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- if (!rhdr && conn->ping_task)
++ if (!rhdr && conn->ping_mtask)
+ return;
+
+ memset(&hdr, 0, sizeof(struct iscsi_nopout));
+@@ -671,9 +493,18 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ } else
+ hdr.ttt = RESERVED_ITT;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
++ if (!mtask) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
++ return;
++ }
++
++ /* only track our nops */
++ if (!rhdr) {
++ conn->ping_mtask = mtask;
++ conn->last_ping = jiffies;
++ }
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+@@ -702,31 +533,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -737,28 +543,108 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+ * queuecommand or send generic. session lock must be held and verify
+ * itt must have been called.
+ */
+-int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, int datalen)
++static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, int datalen)
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+ conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
++ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
++ datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (conn->ping_mtask != mtask) {
++ /*
++ * If this is not in response to one of our
++ * nops then it must be from userspace.
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
++ datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ } else
++ mod_timer(&conn->transport_timer,
++ jiffies + conn->recv_timeout);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+
+ switch(opcode) {
+@@ -785,104 +671,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+
+ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+@@ -896,63 +689,51 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "received itt %x expected session "
++ "age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
++ "with itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,6 +755,61 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
++static void iscsi_prep_mtask(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = mtask->hdr;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++
++ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
++ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, session->age);
++ /*
++ * TODO: We always use immediate, so we never hit this.
++ * If we start to send tmfs or nops as non-immediate then
++ * we should start checking the cmdsn numbers for mgmt tasks.
++ */
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
++ session->queued_cmdsn++;
++ session->cmdsn++;
++ }
++ }
++
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
++ mtask->data_count);
++}
++
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
++{
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc;
++
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
++ conn->session->state = ISCSI_STATE_LOGGING_OUT;
++ spin_unlock_bh(&conn->session->lock);
++
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ spin_lock_bh(&conn->session->lock);
++ if (rc)
++ return rc;
++
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
++ return 0;
++}
++
+ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ {
+ struct iscsi_session *session = conn->session;
+@@ -991,38 +827,37 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ return 0;
+ }
+
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
++static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task = conn->task;
++ struct iscsi_cmd_task *ctask = conn->ctask;
+ int rc;
+
+- __iscsi_get_task(task);
++ __iscsi_get_ctask(ctask);
+ spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
++ rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ if (!rc)
+- /* done with this task */
+- conn->task = NULL;
++ /* done with this ctask */
++ conn->ctask = NULL;
+ return rc;
+ }
+
+ /**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
++ * iscsi_requeue_ctask - requeue ctask to run from session workqueue
++ * @ctask: ctask to requeue
+ *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
++ * LLDs that need to run a ctask from the session workqueue should call
++ * this. The session lock must be held.
+ */
+-void iscsi_requeue_task(struct iscsi_task *task)
++void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+- list_move_tail(&task->running, &conn->requeue);
++ list_move_tail(&ctask->running, &conn->requeue);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
++EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1044,8 +879,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ return -ENODATA;
+ }
+
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ rc = iscsi_xmit_ctask(conn);
++ if (rc)
++ goto again;
++ }
++
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1057,14 +898,17 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ */
+ check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
++ conn->mtask = list_entry(conn->mgmtqueue.next,
++ struct iscsi_mgmt_task, running);
++ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
++ iscsi_free_mgmt_task(conn, conn->mtask);
++ conn->mtask = NULL;
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ iscsi_prep_mtask(conn, conn->mtask);
++ list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1074,21 +918,24 @@ check_mgmt:
+ if (conn->tmf_state == TMF_QUEUED)
+ break;
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
++ fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ continue;
+ }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
++ if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
++ fail_command(conn, conn->ctask, DID_ABORT << 16);
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ /*
+- * we could continuously get new task requests so
++ * we could continuously get new ctask requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+@@ -1106,11 +953,11 @@ check_mgmt:
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
++ conn->ctask = list_entry(conn->requeue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ if (!list_empty(&conn->mgmtqueue))
+@@ -1154,12 +1001,11 @@ enum {
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+@@ -1168,11 +1014,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ host = sc->device->host;
+ spin_unlock(host->host_lock);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ session = iscsi_hostdata(host->hostdata);
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
++ reason = iscsi_session_chkready(session_to_cls(session));
+ if (reason) {
+ sc->result = reason;
+ goto fault;
+@@ -1227,39 +1072,26 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ goto reject;
+ }
+
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
++ session->queued_cmdsn++;
++
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
++ sc->SCp.ptr = (char *)ctask;
+
+- session->queued_cmdsn++;
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
+ spin_unlock(&session->lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ spin_lock(host->host_lock);
+ return 0;
+
+@@ -1272,13 +1104,8 @@ reject:
+ fault:
+ spin_unlock(&session->lock);
+ debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ sc->scsi_done(sc);
+ spin_lock(host->host_lock);
+ return 0;
+ }
+@@ -1295,7 +1122,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+@@ -1309,13 +1136,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+@@ -1377,11 +1200,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ int timeout)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+- if (!task) {
++ if (!mtask) {
+ spin_unlock_bh(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ spin_lock_bh(&session->lock);
+@@ -1397,6 +1220,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);
++ scsi_queue_work(session->host, &conn->xmitwork);
+
+ /*
+ * block eh thread until:
+@@ -1415,7 +1239,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
++ /* if the session drops it will clean up the mtask */
+ if (age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN)
+ return -ENOTCONN;
+@@ -1429,51 +1253,48 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+ int error)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_cmd_task *ctask, *tmp;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
++ conn->ctask = NULL;
+
+ /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+ /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+ }
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
++static void iscsi_suspend_tx(struct iscsi_conn *conn)
+ {
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
++ scsi_flush_work(conn->session->host);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+
+ static void iscsi_start_tx(struct iscsi_conn *conn)
+ {
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+@@ -1484,7 +1305,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+
+ cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
++ session = class_to_transport_session(cls_session);
+
+ debug_scsi("scsi cmd %p timedout\n", scmd);
+
+@@ -1522,7 +1343,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ jiffies))
+ rc = EH_RESET_TIMER;
+ /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
++ if (conn->ping_mtask)
+ rc = EH_RESET_TIMER;
+ done:
+ spin_unlock(&session->lock);
+@@ -1546,7 +1367,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
+
+ recv_timeout *= HZ;
+ last_recv = conn->last_recv;
+- if (conn->ping_task &&
++ if (conn->ping_mtask &&
+ time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+ jiffies)) {
+ iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+@@ -1572,30 +1393,27 @@ done:
+ spin_unlock(&session->lock);
+ }
+
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
++static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_tm *hdr)
+ {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_tm *hdr;
+ int rc, age;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ /*
+@@ -1624,17 +1442,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->eh_abort_cnt++;
+ age = session->age;
+
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
++ if (ctask->state == ISCSI_TASK_PENDING) {
++ fail_command(conn, ctask, DID_ABORT << 16);
+ goto success;
+ }
+
+@@ -1644,7 +1462,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ iscsi_prep_abort_task_pdu(ctask, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ rc = FAILED;
+@@ -1654,20 +1472,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+ iscsi_suspend_tx(conn);
+ /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
++ * clean up task if aborted. grab the recv lock as a writer
+ */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
++ fail_command(conn, ctask, DID_ABORT << 16);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+ iscsi_start_tx(conn);
+ goto success_unlocked;
+ case TMF_TIMEDOUT:
+@@ -1677,7 +1491,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ case TMF_NOT_FOUND:
+ if (!sc->SCp.ptr) {
+ conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ /* ctask completed before tmf abort response */
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+@@ -1690,7 +1504,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ success:
+ spin_unlock_bh(&session->lock);
+ success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+
+@@ -1698,7 +1512,7 @@ failed:
+ spin_unlock_bh(&session->lock);
+ failed_unlocked:
+ debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
++ ctask ? ctask->itt : 0);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+@@ -1716,15 +1530,12 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+
+ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+
+ mutex_lock(&session->eh_mutex);
+@@ -1767,11 +1578,13 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
+-
++ /* need to grab the recv lock then session lock */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_all_commands(conn, sc->device->lun, DID_ERROR);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+@@ -1847,203 +1660,177 @@ void iscsi_pool_free(struct iscsi_pool *q)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
++ * @cmds_max: scsi host can queue
++ * @qdepth: scsi host cmds per lun
++ * @cmd_task_size: LLD ctask private data size
++ * @mgmt_task_size: LLD mtask private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
+- return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
++ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
++ if (qdepth != 0)
++ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
++ "Queue depth must be between 1 and %d.\n",
++ qdepth, ISCSI_MAX_CMD_PER_LUN);
++ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
++ if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
++ cmds_max < 2) {
++ if (cmds_max != 0)
++ printk(KERN_ERR "iscsi: invalid can_queue of %d. "
++ "can_queue must be a power of 2 and between "
++ "2 and %d - setting to %d.\n", cmds_max,
++ ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
++ cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++
++ /* the iscsi layer takes one task for reserve */
++ shost->can_queue = cmds_max - 1;
++ shost->cmd_per_lun = qdepth;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
++ *hostno = shost->host_no;
++
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+ session->fast_abort = 1;
+ session->lu_reset_timeout = 15;
+ session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = cmds_max;
+ session->queued_cmdsn = session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+ mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
+
+- if (iscsi_add_session(cls_session, id))
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
++
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool);
++mgmtpool_alloc_fail:
+ iscsi_pool_free(&session->cmdpool);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
++ iscsi_remove_session(cls_session);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool);
+ iscsi_pool_free(&session->cmdpool);
+
+ kfree(session->password);
+@@ -2051,10 +1838,12 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ kfree(session->username);
+ kfree(session->username_in);
+ kfree(session->targetname);
++ kfree(session->netdev);
++ kfree(session->hwaddress);
+ kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+- iscsi_destroy_session(cls_session);
++ iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,26 +1851,22 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+@@ -2100,30 +1885,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ INIT_LIST_HEAD(&conn->requeue);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+ init_timer(&conn->tmf_timer);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2183,7 +1968,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+ if (session->leadconn == conn)
+ session->leadconn = NULL;
+@@ -2255,7 +2040,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
++ iscsi_unblock_session(session_to_cls(session));
+ wake_up(&conn->ehwait);
+ return 0;
+ }
+@@ -2264,23 +2049,21 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+- conn->task = NULL;
++ conn->mtask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2299,6 +2082,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ }
+
+ /*
++ * The LLD either freed/unset the lock on us, or userspace called
++ * stop but did not create a proper connection (connection was never
++ * bound or it was unbound then stop was called).
++ */
++ if (!conn->recv_lock) {
++ spin_unlock_bh(&session->lock);
++ mutex_unlock(&session->eh_mutex);
++ return;
++ }
++
++ /*
+ * When this is called for the in_login state, we only want to clean
+ * up the login task and connection. We do not need to block and set
+ * the recovery state again
+@@ -2314,6 +2108,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
++
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +2125,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2361,7 +2160,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2500,14 +2299,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,7 +2310,8 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+@@ -2574,15 +2366,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2642,35 +2425,29 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
++ if (!session->netdev)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->netdev);
++ len = sprintf(buf, "%s\n", session->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
++ if (!session->hwaddress)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
++ len = sprintf(buf, "%s\n", session->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
++ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
++ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
++
+ default:
+ return -ENOSYS;
+ }
+@@ -2682,20 +2459,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
++ if (!session->netdev)
++ session->netdev = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
++ if (!session->hwaddress)
++ session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
++ if (!session->initiatorname)
++ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
+ default:
+ return -ENOSYS;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..ca7bb6f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,24 +30,23 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
++#define ISCSI_SESSION_ATTRS 19
+ #define ISCSI_CONN_ATTRS 13
+ #define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_TRANSPORT_VERSION "2.0-869"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+@@ -64,12 +63,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +78,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,142 +116,21 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
++ atomic_set(&ihost->nr_scans, 0);
+
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
++ snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
++ shost->host_no);
+ ihost->scan_workq = create_singlethread_workqueue(
+ ihost->scan_workq_name);
+ if (!ihost->scan_workq)
+@@ -257,10 +139,10 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ }
+
+ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ destroy_workqueue(ihost->scan_workq);
+ return 0;
+@@ -403,24 +285,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+ /**
+ * iscsi_scan_finished - helper to report when running scans are done
+ * @shost: scsi host
+@@ -431,7 +295,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+ */
+ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ /*
+ * qla4xxx will have kicked off some session unblocks before calling
+ * scsi_scan_host, so just wait for them to complete.
+@@ -440,61 +304,22 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
++ return 0;
+ }
+
+ static void iscsi_scan_session(struct work_struct *work)
+@@ -502,14 +327,19 @@ static void iscsi_scan_session(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, scan_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
++ struct iscsi_host *ihost = shost->shost_data;
++ unsigned long flags;
+
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
++ spin_lock_irqsave(&session->lock, flags);
++ if (session->state != ISCSI_SESSION_LOGGED_IN) {
++ spin_unlock_irqrestore(&session->lock, flags);
++ goto done;
++ }
++ spin_unlock_irqrestore(&session->lock, flags);
+
+- iscsi_user_scan_session(&session->dev, &scan_data);
++ scsi_scan_target(&session->dev, 0, session->target_id,
++ SCAN_WILD_CARD, 1);
++done:
+ atomic_dec(&ihost->nr_scans);
+ }
+
+@@ -549,7 +379,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unblock_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ /*
+@@ -617,19 +447,15 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unbind_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
++ if (list_empty(&session->host_list)) {
+ mutex_unlock(&ihost->mutex);
+ return;
+ }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
++ list_del_init(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+ scsi_remove_target(&session->dev);
+@@ -639,18 +465,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ static int iscsi_unbind_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ return queue_work(ihost->scan_workq, &session->unbind_work);
+ }
+
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+@@ -659,6 +485,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->recovery_tmo = 120;
+ session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+ INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+ INIT_WORK(&session->block_work, __iscsi_block_session);
+@@ -671,57 +498,22 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
++ struct iscsi_host *ihost;
+ unsigned long flags;
+- unsigned int id = target_id;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+@@ -737,6 +529,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ list_add(&session->sess_list, &sesslist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
++
+ iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+ return 0;
+
+@@ -750,18 +546,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+ * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+ */
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -797,7 +593,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+ int err;
+
+@@ -863,7 +659,6 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -876,17 +671,18 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * non-zero.
+ */
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -1219,20 +1015,21 @@ int iscsi_session_event(struct iscsi_cls_session *session,
+ EXPORT_SYMBOL_GPL(iscsi_session_event);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.cmds_max,
++ ev->u.c_session.queue_depth,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1307,7 +1104,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +1113,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1408,7 +1193,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,22 +1206,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+@@ -1568,8 +1337,11 @@ iscsi_if_rx(struct sk_buff *skb)
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1349,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1604,16 +1375,17 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+ iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+ iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
++
+ /*
+ * iSCSI session attrs
+ */
+ #define iscsi_session_attr_show(param, perm) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+@@ -1643,14 +1415,11 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+ iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+
+ static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_priv_session_state(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
+ return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+@@ -1658,11 +1427,9 @@ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1677,10 +1444,9 @@ iscsi_priv_session_attr(recovery_tmo, "%d");
+ */
+ #define iscsi_host_attr_show(param) \
+ static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_host_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
++ struct Scsi_Host *shost = transport_class_to_shost(cdev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+ }
+@@ -1697,7 +1463,7 @@ iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1471,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,7 +1479,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1721,7 +1487,7 @@ do { \
+ #define SETUP_HOST_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->host_attrs[count] = &class_device_attr_host_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,24 +1577,22 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
+ transport_container_register(&priv->t.host_attrs);
+
+ SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+@@ -1886,8 +1650,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+ SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+ SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+ SETUP_PRIV_SESSION_RD_ATTR(state);
+
+@@ -1901,9 +1663,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1691,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1712,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,8 +1724,8 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+@@ -1988,8 +1745,6 @@ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -2002,7 +1757,6 @@ static void __exit iscsi_transport_exit(void)
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..7b90b63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -52,7 +49,9 @@ struct device;
+ #endif
+
+ #define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_MGMT_CMDS_MAX 16 /* must be power of 2 */
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+@@ -70,10 +69,7 @@ enum {
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+@@ -86,6 +82,18 @@ enum {
+ ISCSI_DIGEST_SIZE = sizeof(__u32),
+ };
+
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ unsigned data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
++};
+
+ enum {
+ ISCSI_TASK_COMPLETED,
+@@ -93,7 +101,7 @@ enum {
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+ * Because LLDs allocate their hdr differently, this is a pointer
+ * and length to that storage. It must be setup at session
+@@ -110,7 +118,6 @@ struct iscsi_task {
+ /* offset in unsolicited stream (bytes); */
+ unsigned unsol_offset;
+ unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
+ struct iscsi_conn *conn; /* used connection */
+
+@@ -121,9 +128,9 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
++static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+ {
+- return (void*)task->hdr + task->hdr_len;
++ return (void*)ctask->hdr + ctask->hdr_len;
+ }
+
+ /* Connection's states */
+@@ -139,6 +146,11 @@ struct iscsi_conn {
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+@@ -147,7 +159,7 @@ struct iscsi_conn {
+ unsigned long last_ping;
+ int ping_timeout;
+ int recv_timeout;
+- struct iscsi_task *ping_task;
++ struct iscsi_mgmt_task *ping_mtask;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,8 +175,9 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
+@@ -195,6 +208,9 @@ struct iscsi_conn {
+ /* remote portal currently connected to */
+ int portal_port;
+ char portal_address[ISCSI_ADDRESS_BUF_LEN];
++ /* local address */
++ int local_port;
++ char local_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,7 +225,6 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+@@ -230,7 +245,6 @@ enum {
+ };
+
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+ /*
+ * Syncs up the scsi eh thread with the iscsi eh thread when sending
+ * task management functions. This must be taken before the session
+@@ -266,8 +280,10 @@ struct iscsi_session {
+ char *password;
+ char *password_in;
+ char *targetname;
+- char *ifacename;
+ char *initiatorname;
++ /* hw address or netdev iscsi connection is bound to */
++ char *hwaddress;
++ char *netdev;
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +297,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
+ struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -307,44 +315,42 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
++
+ /*
+ * iSCSI host helpers.
+ */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+ extern int iscsi_host_set_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf,
+ int buflen);
+ extern int iscsi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+
+ /*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
++
+ #define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++ iscsi_cls_session_printk(prefix, \
++ (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,29 +359,25 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+
+ #define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
++ iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+ extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+- char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
++extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
++extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+ /*
+ * generic helpers
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..aab1eae 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -30,11 +30,11 @@
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +58,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -86,9 +83,17 @@ struct iscsi_transport {
+ /* LLD sets this to indicate what values it can export to sysfs */
+ uint64_t param_mask;
+ uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint16_t, uint16_t,
++ uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -113,15 +118,20 @@ struct iscsi_transport {
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
+ int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+@@ -162,10 +172,9 @@ enum {
+ ISCSI_SESSION_FREE,
+ };
+
+-#define ISCSI_MAX_TARGET -1
+-
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+ spinlock_t lock;
+ struct work_struct block_work;
+@@ -177,7 +186,7 @@ struct iscsi_cls_session {
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+ int state;
+ int sid; /* session id */
+@@ -194,20 +203,12 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
++struct iscsi_host {
++ struct list_head sessions;
+ atomic_t nr_scans;
+ struct mutex mutex;
+ struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
++ char scan_workq_name[KOBJ_NAME_LEN];
+ };
+
+ /*
+@@ -221,26 +222,22 @@ struct iscsi_endpoint {
+
+ extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+ extern int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18-EL5.1/iscsi_02_count_fmr_align_violations.patch b/kernel_patches/backport/2.6.18-EL5.1/iscsi_02_count_fmr_align_violations.patch
new file mode 100644
index 0000000..9bf2d19
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.1/iscsi_02_count_fmr_align_violations.patch
@@ -0,0 +1,24 @@
+From 02753dd2caabfe6b1885cb80a8fb8532b416108d Mon Sep 17 00:00:00 2001
+From: Eli Dorfman <elid at voltaire.com>
+Date: Tue, 29 Apr 2008 10:12:39 +0300
+Subject: [PATCH] IB/iSER: Count fmr alignment violations per session
+
+Count fmr alignment violations per session
+as part of the iscsi statistics.
+
+Signed-off-by: Eli Dorfman <elid at voltaire.com>
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 7b90b63..cd3ca63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -225,6 +225,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+--
+1.5.5
+
diff --git a/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
new file mode 100644
index 0000000..798571f
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
@@ -0,0 +1,151 @@
+From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 29 Jun 2008 15:41:12 +0300
+Subject: [PATCH] copmat patch for RHEL5 and SLES10
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
+ 1 files changed, 54 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index ca7bb6f..0ccd7e2 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -20,6 +20,8 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
++#include <linux/version.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <net/tcp.h>
+@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ * the async scanning code (drivers like iscsi_tcp do login and
+ * scanning from userspace).
+ */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
++ if (shost->hostt->scan_finished) {
++ if (queue_work(ihost->scan_workq, &session->scan_work))
++ atomic_inc(&ihost->nr_scans);
++ }
++#endif
+ }
+
+ /**
+@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ * Malformed skbs with wrong lengths or invalid creds are not processed.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
+ return 0;
+
+ release_nls:
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+@@ -1753,7 +1768,7 @@ unregister_transport_class:
+ static void __exit iscsi_transport_exit(void)
+ {
+ destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
deleted file mode 100644
index 798571f..0000000
--- a/kernel_patches/backport/2.6.18-EL5.1/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
-From: Doron Shoham <dorons at voltaire.com>
-Date: Sun, 29 Jun 2008 15:41:12 +0300
-Subject: [PATCH] copmat patch for RHEL5 and SLES10
-
-Signed-off-by: Doron Shoham <dorons at voltaire.com>
----
- drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
- 1 files changed, 54 insertions(+), 39 deletions(-)
-
-diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
-index ca7bb6f..0ccd7e2 100644
---- a/drivers/scsi/scsi_transport_iscsi.c
-+++ b/drivers/scsi/scsi_transport_iscsi.c
-@@ -20,6 +20,8 @@
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-+#include <linux/version.h>
-+#include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/mutex.h>
- #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
-- if (shost->hostt->scan_finished) {
-- if (queue_work(ihost->scan_workq, &session->scan_work))
-- atomic_inc(&ihost->nr_scans);
-- }
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
-+ if (shost->hostt->scan_finished) {
-+ if (queue_work(ihost->scan_workq, &session->scan_work))
-+ atomic_inc(&ihost->nr_scans);
-+ }
-+#endif
- }
-
- /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
- * Malformed skbs with wrong lengths or invalid creds are not processed.
- */
- static void
--iscsi_if_rx(struct sk_buff *skb)
-+iscsi_if_rx(struct sock *sk, int len)
- {
-+ struct sk_buff *skb;
-+
- mutex_lock(&rx_queue_mutex);
-- while (skb->len >= NLMSG_SPACE(0)) {
-- int err;
-- uint32_t rlen;
-- struct nlmsghdr *nlh;
-- struct iscsi_uevent *ev;
--
-- nlh = nlmsg_hdr(skb);
-- if (nlh->nlmsg_len < sizeof(*nlh) ||
-- skb->len < nlh->nlmsg_len) {
-- break;
-+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-+ if (NETLINK_CREDS(skb)->uid) {
-+ skb_pull(skb, skb->len);
-+ goto free_skb;
- }
-
-- ev = NLMSG_DATA(nlh);
-- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-- if (rlen > skb->len)
-- rlen = skb->len;
-+ while (skb->len >= NLMSG_SPACE(0)) {
-+ int err;
-+ uint32_t rlen;
-+ struct nlmsghdr *nlh;
-+ struct iscsi_uevent *ev;
-
-- err = iscsi_if_recv_msg(skb, nlh);
-- if (err) {
-- ev->type = ISCSI_KEVENT_IF_ERROR;
-- ev->iferror = err;
-- }
-- do {
-- /*
-- * special case for GET_STATS:
-- * on success - sending reply and stats from
-- * inside of if_recv_msg(),
-- * on error - fall through.
-- */
-- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ nlh = nlmsg_hdr(skb);
-+ if (nlh->nlmsg_len < sizeof(*nlh) ||
-+ skb->len < nlh->nlmsg_len) {
- break;
-- err = iscsi_if_send_reply(
-- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-- } while (err < 0 && err != -ECONNREFUSED);
-- skb_pull(skb, rlen);
-+ }
-+
-+ ev = NLMSG_DATA(nlh);
-+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-+ if (rlen > skb->len)
-+ rlen = skb->len;
-+
-+ err = iscsi_if_recv_msg(skb, nlh);
-+ if (err) {
-+ ev->type = ISCSI_KEVENT_IF_ERROR;
-+ ev->iferror = err;
-+ }
-+ do {
-+ /*
-+ * special case for GET_STATS:
-+ * on success - sending reply and stats from
-+ * inside of if_recv_msg(),
-+ * on error - fall through.
-+ */
-+ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ break;
-+ err = iscsi_if_send_reply(
-+ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-+ } while (err < 0 && err != -ECONNREFUSED);
-+ skb_pull(skb, rlen);
-+ }
-+free_skb:
-+ kfree_skb(skb);
- }
- mutex_unlock(&rx_queue_mutex);
- }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
- return 0;
-
- release_nls:
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- unregister_session_class:
- transport_class_unregister(&iscsi_session_class);
- unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
- static void __exit iscsi_transport_exit(void)
- {
- destroy_workqueue(iscsi_eh_timer_workq);
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- transport_class_unregister(&iscsi_connection_class);
- transport_class_unregister(&iscsi_session_class);
- transport_class_unregister(&iscsi_host_class);
---
-1.5.3.8
-
diff --git a/kernel_patches/backport/2.6.18-EL5.1/iser_01_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.18-EL5.1/iser_01_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..155795d
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.1/iser_01_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From ad1e1df62ff096cc90257b0b42e843d0773ae981 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 11:37:50 +0300
+Subject: [PATCH] iser backports
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18-EL5.1/iser_02_fix_iscsi_if.h b/kernel_patches/backport/2.6.18-EL5.1/iser_02_fix_iscsi_if.h
new file mode 100644
index 0000000..02c8a81
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.1/iser_02_fix_iscsi_if.h
@@ -0,0 +1,145 @@
+From c703d2c0ca18a6a5b8f4ecbd5c02654a15fb11ff Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 14:26:17 +0300
+Subject: [PATCH] fix iscsi_if.h
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/iscsi_if.h | 93 ++++++++++++++++++++++-------------------------
+ 1 files changed, 43 insertions(+), 50 deletions(-)
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..e19e584 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -50,7 +50,6 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+ ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+ ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+@@ -79,12 +78,6 @@ struct iscsi_uevent {
+ uint16_t cmds_max;
+ uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -257,49 +250,42 @@ enum iscsi_param {
+
+ ISCSI_PARAM_PING_TMO,
+ ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
++#define ISCSI_USERNAME (1 << ISCSI_PARAM_USERNAME)
++#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN)
++#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD)
++#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN)
++#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT)
++#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO)
++#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO)
++#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO)
++#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO)
++#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO)
+
+ /* iSCSI HBA params */
+ enum iscsi_host_param {
+@@ -310,13 +296,20 @@ enum iscsi_host_param {
+ ISCSI_HOST_PARAM_MAX,
+ };
+
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_HOST_HWADDRESS (1 << ISCSI_HOST_PARAM_HWADDRESS)
++#define ISCSI_HOST_INITIATOR_NAME (1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
++#define ISCSI_HOST_NETDEV_NAME (1 << ISCSI_HOST_PARAM_NETDEV_NAME)
++#define ISCSI_HOST_IPADDRESS (1 << ISCSI_HOST_PARAM_IPADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18-EL5.2/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch b/kernel_patches/backport/2.6.18-EL5.2/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
new file mode 100644
index 0000000..cd24137
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.2/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
@@ -0,0 +1,4746 @@
+From 89ac09ec66db75fbda1bd77918066fb2ddebac38 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Mon, 25 Aug 2008 16:16:26 +0300
+Subject: [PATCH] iscsi_01_sync_kernel_code_with_release_2.0-869.2
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 529 +++++++-------
+ drivers/scsi/iscsi_tcp.h | 7 +-
+ drivers/scsi/libiscsi.c | 1457 +++++++++++++++--------------------
+ drivers/scsi/scsi_transport_iscsi.c | 500 +++---------
+ include/scsi/libiscsi.h | 108 ++--
+ include/scsi/scsi_transport_iscsi.h | 93 ++--
+ 6 files changed, 1120 insertions(+), 1574 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..8a17867 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -64,10 +64,6 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -498,63 +494,58 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- tcp_task->r2t = NULL;
++ tcp_ctask->r2t = NULL;
+ }
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (tcp_ctask->exp_datasn != datasn) {
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+ return ISCSI_ERR_DATASN;
+ }
+
+- tcp_task->exp_datasn++;
++ tcp_ctask->exp_datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ __FUNCTION__, tcp_ctask->data_offset,
++ tcp_conn->in.datalen, scsi_bufflen(sc));
+ return ISCSI_ERR_DATA_OFFSET;
+ }
+
+@@ -567,8 +558,8 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
++ res_count <= scsi_bufflen(sc)))
++ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+@@ -582,7 +573,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,7 +583,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -603,8 +594,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -624,14 +615,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+@@ -644,23 +635,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_datasn != r2tsn){
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+ return ISCSI_ERR_R2TSN;
+ }
+
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
++ if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ ctask->itt);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+@@ -668,7 +659,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ if (r2t->data_length == 0) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -679,12 +670,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
++ if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ r2t->data_offset, scsi_bufflen(ctask->sc));
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -692,13 +683,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->exp_datasn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+ conn->r2t_pdus_cnt++;
+
+- iscsi_requeue_task(task);
++ iscsi_requeue_ctask(ctask);
+ return 0;
+ }
+
+@@ -741,8 +732,10 @@ static int
+ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+@@ -760,7 +753,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
++ rc = iscsi_verify_itt(conn, hdr, &itt);
+ if (rc)
+ return rc;
+
+@@ -769,21 +762,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
++ ctask = session->cmds[itt];
+ spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
++ rc = iscsi_data_rsp(conn, ctask);
++ spin_unlock(&conn->session->lock);
++ if (rc)
++ return rc;
+ if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+@@ -798,21 +785,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
++ tcp_ctask->data_offset,
+ tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
+- return rc;
++ return iscsi_segment_seek_sg(&tcp_conn->in.segment,
++ scsi_sglist(ctask->sc),
++ scsi_sg_count(ctask->sc),
++ tcp_ctask->data_offset,
++ tcp_conn->in.datalen,
++ iscsi_tcp_process_data_in,
++ rx_hash);
+ }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+@@ -821,17 +804,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
+- else
++ else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ spin_lock(&session->lock);
++ rc = iscsi_r2t_rsp(conn, ctask);
++ spin_unlock(&session->lock);
++ } else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -1193,7 +1174,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
++ debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ conn->hdrdgst_en? ", digest enabled" : "");
+
+ /* Clear the data segment - needs to be filled in by the
+@@ -1202,7 +1183,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+
+ /* If header digest is enabled, compute the CRC and
+ * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
++ * sure that both iscsi_tcp_ctask and mtask have
+ * sufficient room.
+ */
+ if (conn->hdrdgst_en) {
+@@ -1234,7 +1215,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
++ debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ tcp_conn, offset, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+@@ -1259,7 +1240,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
++ debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+ /* Make sure the datalen matches what the caller
+@@ -1277,7 +1258,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1288,7 +1269,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ * Called under connection lock.
+ **/
+ static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -1305,8 +1286,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1324,76 +1305,87 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct scsi_cmnd *sc = ctask->sc;
+ int err;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
+- */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
+-
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+-
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
+- }
+-
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++ tcp_ctask->sent = 0;
++ tcp_ctask->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
++ conn->id, ctask->itt, ctask->imm_count,
++ ctask->unsol_count);
++ iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+
+- if (!task->imm_count)
++ if (!ctask->imm_count)
+ return 0;
+
+ /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
++ err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
++ 0, ctask->imm_count);
+ if (err)
+ return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ tcp_ctask->sent += ctask->imm_count;
++ ctask->imm_count = 0;
++ return 0;
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ **/
++static int
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ int rc;
++
++ /* Flush any pending data first. */
++ rc = iscsi_tcp_flush(conn);
++ if (rc < 0)
++ return rc;
++
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock_bh(&session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock_bh(&session->lock);
++ }
++
+ return 0;
+ }
+
+ /*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
++ * iscsi_tcp_ctask_xmit - xmit normal PDU task
++ * @conn: iscsi connection
++ * @ctask: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+ int rc = 0;
+
+ flush:
+@@ -1402,39 +1394,32 @@ flush:
+ if (rc < 0)
+ return rc;
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
+- return 0;
+- }
+-
+ /* Are we done already? */
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (ctask->unsol_count != 0) {
++ struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+
+ /* Prepare a header for the unsolicited PDU.
+ * The amount of data we want to send will be
+- * in task->data_count.
++ * in ctask->data_count.
+ * FIXME: return the data count instead.
+ */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+
+ debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++ ctask->itt, tcp_ctask->sent, ctask->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
++ tcp_ctask->sent,
++ ctask->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
++ tcp_ctask->sent += ctask->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ goto flush;
+ } else {
+ struct iscsi_session *session = conn->session;
+@@ -1443,22 +1428,22 @@ flush:
+ /* All unsolicited PDUs sent. Check for solicited PDUs.
+ */
+ spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+ /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
++ if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ debug_scsi(" done with r2t %p\n", r2t);
+
+- __kfifo_put(tcp_task->r2tpool.queue,
++ __kfifo_put(tcp_ctask->r2tpool.queue,
+ (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
++ tcp_ctask->r2t = r2t = NULL;
+ }
+ }
+
+ if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ }
+ spin_unlock_bh(&session->lock);
+
+@@ -1469,19 +1454,19 @@ flush:
+ }
+
+ debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
++ r2t, r2t->solicit_datasn - 1, ctask->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+ sizeof(struct iscsi_hdr));
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
+ r2t->data_offset + r2t->sent,
+ r2t->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += r2t->data_count;
++ tcp_ctask->sent += r2t->data_count;
+ r2t->sent += r2t->data_count;
+ goto flush;
+ }
+@@ -1498,7 +1483,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,14 +1493,18 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+ if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ goto free_tcp_conn;
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+@@ -1527,12 +1516,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
++free_tcp_conn:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Could not create connection due to crc32c "
+ "loading error. Make sure the crc32c "
+ "module is built as a module or into the "
+ "kernel\n");
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1553,6 +1544,7 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+
+ spin_lock_bh(&session->lock);
+ tcp_conn->sock = NULL;
++ conn->recv_lock = NULL;
+ spin_unlock_bh(&session->lock);
+ sockfd_put(sock);
+ }
+@@ -1564,32 +1556,20 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+@@ -1640,8 +1620,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1665,8 +1643,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ if (err)
+ goto free_socket;
+
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
++ err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
++ &conn->local_port, kernel_getsockname);
+ if (err)
+ goto free_socket;
+
+@@ -1683,6 +1661,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+@@ -1696,6 +1681,21 @@ free_socket:
+ return err;
+ }
+
++/* called with host lock */
++static void
++iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
++
++ /* Prepare PDU, optionally w/ immediate data */
++ iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
++
++ /* If we have immediate data, attach a payload */
++ if (mtask->data_count)
++ iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
++ mtask->data_count);
++}
++
+ static int
+ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ {
+@@ -1706,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ }
+
+@@ -1818,6 +1818,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ return len;
+ }
+
++static int
++iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
++ char *buf)
++{
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
++ int len;
++
++ switch (param) {
++ case ISCSI_HOST_PARAM_IPADDRESS:
++ spin_lock_bh(&session->lock);
++ if (!session->leadconn)
++ len = -ENODEV;
++ else
++ len = sprintf(buf, "%s\n",
++ session->leadconn->local_address);
++ spin_unlock_bh(&session->lock);
++ break;
++ default:
++ return iscsi_host_get_param(shost, param, buf);
++ }
++ return len;
++}
++
+ static void
+ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ {
+@@ -1843,70 +1866,54 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
++ ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
++ }
++
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+@@ -1961,11 +1968,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME |
+ ISCSI_HOST_NETDEV_NAME,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = 16,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1979,14 +1989,16 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+ /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
++ .get_host_param = iscsi_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_ctask_init,
++ .init_mgmt_task = iscsi_tcp_mtask_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2011,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..ed0b991 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -103,6 +103,11 @@ struct iscsi_data_task {
+ char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
+ };
+
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
++};
++
+ struct iscsi_r2t_info {
+ __be32 ttt; /* copied from R2T */
+ __be32 exp_statsn; /* copied from R2T */
+@@ -114,7 +119,7 @@ struct iscsi_r2t_info {
+ struct iscsi_data_task dtask; /* Data-Out header buf */
+ };
+
+-struct iscsi_tcp_task {
++struct iscsi_tcp_cmd_task {
+ struct iscsi_hdr_buff {
+ struct iscsi_cmd cmd_hdr;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..9975095 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -38,6 +38,14 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
++{
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
++}
++EXPORT_SYMBOL_GPL(class_to_transport_session);
++
+ /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+ #define SNA32_CHECK 2147483648UL
+
+@@ -79,170 +87,91 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ * xmit thread
+ */
+ if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
++ !list_empty(&session->leadconn->mgmtqueue))
++ scsi_queue_work(session->host,
++ &session->leadconn->xmitwork);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
++static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+ {
+- unsigned exp_len = task->hdr_len + len;
++ unsigned exp_len = ctask->hdr_len + len;
+
+- if (exp_len > task->hdr_max) {
++ if (exp_len > ctask->hdr_max) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
++ ctask->hdr_len = exp_len;
+ return 0;
+ }
+
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++ unsigned hdrlength;
+ int rc;
+
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
++ ctask->hdr_len = 0;
++ rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ if (rc)
+ return rc;
+ hdr->opcode = ISCSI_OP_SCSI_CMD;
+ hdr->flags = ISCSI_ATTR_SIMPLE;
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
++ hdr->itt = build_itt(ctask->itt, session->age);
++ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
+ hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ session->cmdsn++;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ if (sc->cmd_len < MAX_COMMAND_SIZE)
++ memset(&hdr->cdb[sc->cmd_len], 0,
++ MAX_COMMAND_SIZE - sc->cmd_len);
+
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ ctask->imm_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,41 +187,40 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (scsi_bufflen(sc) >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(scsi_bufflen(sc),
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(hdr->dlength, ctask->imm_count);
+ } else
+ zero_data(hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min((session->first_burst),
++ (scsi_bufflen(sc))) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+ /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
++ hdrlength = ctask->hdr_len - sizeof(*hdr);
+
+ WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ hdrlength /= ISCSI_PAD_LEN;
+@@ -300,180 +228,110 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ WARN_ON(hdrlength >= 256);
+ hdr->hlength = hdrlength & 0xFF;
+
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
++ if (conn->session->tt->init_cmd_task(conn->ctask))
++ return EIO;
+
+ conn->scsicmd_pdus_cnt++;
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
++ "cmdsn %d win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ return 0;
+ }
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
+-
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
++ struct scsi_cmnd *sc = ctask->sc;
+
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
+-}
+-
+-void __iscsi_get_task(struct iscsi_task *task)
+-{
+- atomic_inc(&task->refcount);
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ atomic_inc(&ctask->refcount);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+ /*
+ * session lock must be held
+ */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ int err)
+ {
+ struct scsi_cmnd *sc;
+
+- sc = task->sc;
++ sc = ctask->sc;
+ if (!sc)
+ return;
+
+- if (task->state == ISCSI_TASK_PENDING)
++ if (ctask->state == ISCSI_TASK_PENDING)
+ /*
+ * cmd never made it to the xmit thread, so we should not count
+ * the cmd in the sequencing
+ */
+ conn->session->queued_cmdsn--;
+ else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
+
+ sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
+ /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++/**
++ * iscsi_free_mgmt_task - return mgmt task back to pool
++ * @conn: iscsi connection
++ * @mtask: mtask
++ *
++ * Must be called with session lock.
++ */
++void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
++ list_del_init(&mtask->running);
++ if (conn->login_mtask == mtask)
++ return;
+
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
++ if (conn->ping_mtask == mtask)
++ conn->ping_mtask = NULL;
++ __kfifo_put(conn->session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
+ }
++EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
+
+-static struct iscsi_task *
++static struct iscsi_mgmt_task *
+ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+@@ -483,56 +341,29 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ /*
+ * Login and Text are sent serially, in
+ * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
+ */
+- task = conn->login_task;
++ mtask = conn->login_mtask;
+ else {
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*)))
+ return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+ }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+
+ if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
+ } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
++ mtask->data_count = 0;
+
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ INIT_LIST_HEAD(&mtask->running);
++ list_add_tail(&mtask->running, &conn->mgmtqueue);
++ return mtask;
+ }
+
+ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+@@ -546,6 +377,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
++ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+@@ -554,7 +386,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+@@ -562,12 +394,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * then completes the command and task.
+ **/
+ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
++ struct iscsi_cmd_task *ctask, char *data,
+ int datalen)
+ {
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+@@ -591,7 +423,7 @@ invalid_datalen:
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,18 +433,6 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
+-
+ if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ ISCSI_FLAG_CMD_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+@@ -620,17 +440,19 @@ invalid_datalen:
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
++ ISCSI_FLAG_CMD_BIDI_OVERFLOW))
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -655,9 +477,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ {
+ struct iscsi_nopout hdr;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- if (!rhdr && conn->ping_task)
++ if (!rhdr && conn->ping_mtask)
+ return;
+
+ memset(&hdr, 0, sizeof(struct iscsi_nopout));
+@@ -671,9 +493,18 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ } else
+ hdr.ttt = RESERVED_ITT;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
++ if (!mtask) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
++ return;
++ }
++
++ /* only track our nops */
++ if (!rhdr) {
++ conn->ping_mtask = mtask;
++ conn->last_ping = jiffies;
++ }
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+@@ -702,31 +533,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -737,28 +543,108 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+ * queuecommand or send generic. session lock must be held and verify
+ * itt must have been called.
+ */
+-int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, int datalen)
++static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, int datalen)
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+ conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
++ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
++ datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (conn->ping_mtask != mtask) {
++ /*
++ * If this is not in response to one of our
++ * nops then it must be from userspace.
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
++ datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ } else
++ mod_timer(&conn->transport_timer,
++ jiffies + conn->recv_timeout);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+
+ switch(opcode) {
+@@ -785,104 +671,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+
+ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+@@ -896,63 +689,51 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "received itt %x expected session "
++ "age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
++ "with itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,6 +755,61 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
++static void iscsi_prep_mtask(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = mtask->hdr;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++
++ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
++ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, session->age);
++ /*
++ * TODO: We always use immediate, so we never hit this.
++ * If we start to send tmfs or nops as non-immediate then
++ * we should start checking the cmdsn numbers for mgmt tasks.
++ */
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
++ session->queued_cmdsn++;
++ session->cmdsn++;
++ }
++ }
++
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
++ mtask->data_count);
++}
++
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
++{
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc;
++
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
++ conn->session->state = ISCSI_STATE_LOGGING_OUT;
++ spin_unlock_bh(&conn->session->lock);
++
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ spin_lock_bh(&conn->session->lock);
++ if (rc)
++ return rc;
++
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
++ return 0;
++}
++
+ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ {
+ struct iscsi_session *session = conn->session;
+@@ -991,38 +827,37 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ return 0;
+ }
+
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
++static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task = conn->task;
++ struct iscsi_cmd_task *ctask = conn->ctask;
+ int rc;
+
+- __iscsi_get_task(task);
++ __iscsi_get_ctask(ctask);
+ spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
++ rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ if (!rc)
+- /* done with this task */
+- conn->task = NULL;
++ /* done with this ctask */
++ conn->ctask = NULL;
+ return rc;
+ }
+
+ /**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
++ * iscsi_requeue_ctask - requeue ctask to run from session workqueue
++ * @ctask: ctask to requeue
+ *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
++ * LLDs that need to run a ctask from the session workqueue should call
++ * this. The session lock must be held.
+ */
+-void iscsi_requeue_task(struct iscsi_task *task)
++void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+- list_move_tail(&task->running, &conn->requeue);
++ list_move_tail(&ctask->running, &conn->requeue);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
++EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1044,8 +879,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ return -ENODATA;
+ }
+
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ rc = iscsi_xmit_ctask(conn);
++ if (rc)
++ goto again;
++ }
++
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1057,14 +898,17 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ */
+ check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
++ conn->mtask = list_entry(conn->mgmtqueue.next,
++ struct iscsi_mgmt_task, running);
++ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
++ iscsi_free_mgmt_task(conn, conn->mtask);
++ conn->mtask = NULL;
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ iscsi_prep_mtask(conn, conn->mtask);
++ list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1074,21 +918,24 @@ check_mgmt:
+ if (conn->tmf_state == TMF_QUEUED)
+ break;
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
++ fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ continue;
+ }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
++ if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
++ fail_command(conn, conn->ctask, DID_ABORT << 16);
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ /*
+- * we could continuously get new task requests so
++ * we could continuously get new ctask requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+@@ -1106,11 +953,11 @@ check_mgmt:
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
++ conn->ctask = list_entry(conn->requeue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ if (!list_empty(&conn->mgmtqueue))
+@@ -1154,12 +1001,11 @@ enum {
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+@@ -1168,11 +1014,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ host = sc->device->host;
+ spin_unlock(host->host_lock);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ session = iscsi_hostdata(host->hostdata);
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
++ reason = iscsi_session_chkready(session_to_cls(session));
+ if (reason) {
+ sc->result = reason;
+ goto fault;
+@@ -1227,39 +1072,26 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ goto reject;
+ }
+
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
++ session->queued_cmdsn++;
++
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
++ sc->SCp.ptr = (char *)ctask;
+
+- session->queued_cmdsn++;
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
+ spin_unlock(&session->lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ spin_lock(host->host_lock);
+ return 0;
+
+@@ -1272,13 +1104,8 @@ reject:
+ fault:
+ spin_unlock(&session->lock);
+ debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ sc->scsi_done(sc);
+ spin_lock(host->host_lock);
+ return 0;
+ }
+@@ -1295,7 +1122,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+@@ -1309,13 +1136,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+@@ -1377,11 +1200,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ int timeout)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+- if (!task) {
++ if (!mtask) {
+ spin_unlock_bh(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ spin_lock_bh(&session->lock);
+@@ -1397,6 +1220,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);
++ scsi_queue_work(session->host, &conn->xmitwork);
+
+ /*
+ * block eh thread until:
+@@ -1415,7 +1239,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
++ /* if the session drops it will clean up the mtask */
+ if (age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN)
+ return -ENOTCONN;
+@@ -1429,51 +1253,48 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+ int error)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_cmd_task *ctask, *tmp;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
++ conn->ctask = NULL;
+
+ /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+ /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+ }
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
++static void iscsi_suspend_tx(struct iscsi_conn *conn)
+ {
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
++ scsi_flush_work(conn->session->host);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+
+ static void iscsi_start_tx(struct iscsi_conn *conn)
+ {
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+@@ -1484,7 +1305,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+
+ cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
++ session = class_to_transport_session(cls_session);
+
+ debug_scsi("scsi cmd %p timedout\n", scmd);
+
+@@ -1522,7 +1343,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ jiffies))
+ rc = EH_RESET_TIMER;
+ /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
++ if (conn->ping_mtask)
+ rc = EH_RESET_TIMER;
+ done:
+ spin_unlock(&session->lock);
+@@ -1546,7 +1367,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
+
+ recv_timeout *= HZ;
+ last_recv = conn->last_recv;
+- if (conn->ping_task &&
++ if (conn->ping_mtask &&
+ time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+ jiffies)) {
+ iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+@@ -1572,30 +1393,27 @@ done:
+ spin_unlock(&session->lock);
+ }
+
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
++static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_tm *hdr)
+ {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_tm *hdr;
+ int rc, age;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ /*
+@@ -1624,17 +1442,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->eh_abort_cnt++;
+ age = session->age;
+
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
++ if (ctask->state == ISCSI_TASK_PENDING) {
++ fail_command(conn, ctask, DID_ABORT << 16);
+ goto success;
+ }
+
+@@ -1644,7 +1462,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ iscsi_prep_abort_task_pdu(ctask, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ rc = FAILED;
+@@ -1654,20 +1472,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+ iscsi_suspend_tx(conn);
+ /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
++ * clean up task if aborted. grab the recv lock as a writer
+ */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
++ fail_command(conn, ctask, DID_ABORT << 16);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+ iscsi_start_tx(conn);
+ goto success_unlocked;
+ case TMF_TIMEDOUT:
+@@ -1677,7 +1491,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ case TMF_NOT_FOUND:
+ if (!sc->SCp.ptr) {
+ conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ /* ctask completed before tmf abort response */
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+@@ -1690,7 +1504,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ success:
+ spin_unlock_bh(&session->lock);
+ success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+
+@@ -1698,7 +1512,7 @@ failed:
+ spin_unlock_bh(&session->lock);
+ failed_unlocked:
+ debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
++ ctask ? ctask->itt : 0);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+@@ -1716,15 +1530,12 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+
+ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+
+ mutex_lock(&session->eh_mutex);
+@@ -1767,11 +1578,13 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
+-
++ /* need to grab the recv lock then session lock */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_all_commands(conn, sc->device->lun, DID_ERROR);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+@@ -1847,203 +1660,177 @@ void iscsi_pool_free(struct iscsi_pool *q)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
++ * @cmds_max: scsi host can queue
++ * @qdepth: scsi host cmds per lun
++ * @cmd_task_size: LLD ctask private data size
++ * @mgmt_task_size: LLD mtask private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
+- return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
++ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
++ if (qdepth != 0)
++ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
++ "Queue depth must be between 1 and %d.\n",
++ qdepth, ISCSI_MAX_CMD_PER_LUN);
++ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
++ if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
++ cmds_max < 2) {
++ if (cmds_max != 0)
++ printk(KERN_ERR "iscsi: invalid can_queue of %d. "
++ "can_queue must be a power of 2 and between "
++ "2 and %d - setting to %d.\n", cmds_max,
++ ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
++ cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++
++ /* the iscsi layer takes one task for reserve */
++ shost->can_queue = cmds_max - 1;
++ shost->cmd_per_lun = qdepth;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
++ *hostno = shost->host_no;
++
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+ session->fast_abort = 1;
+ session->lu_reset_timeout = 15;
+ session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = cmds_max;
+ session->queued_cmdsn = session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+ mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
+
+- if (iscsi_add_session(cls_session, id))
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
++
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool);
++mgmtpool_alloc_fail:
+ iscsi_pool_free(&session->cmdpool);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
++ iscsi_remove_session(cls_session);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool);
+ iscsi_pool_free(&session->cmdpool);
+
+ kfree(session->password);
+@@ -2051,10 +1838,12 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ kfree(session->username);
+ kfree(session->username_in);
+ kfree(session->targetname);
++ kfree(session->netdev);
++ kfree(session->hwaddress);
+ kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+- iscsi_destroy_session(cls_session);
++ iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,26 +1851,22 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+@@ -2100,30 +1885,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ INIT_LIST_HEAD(&conn->requeue);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+ init_timer(&conn->tmf_timer);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2183,7 +1968,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+ if (session->leadconn == conn)
+ session->leadconn = NULL;
+@@ -2255,7 +2040,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
++ iscsi_unblock_session(session_to_cls(session));
+ wake_up(&conn->ehwait);
+ return 0;
+ }
+@@ -2264,23 +2049,21 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+- conn->task = NULL;
++ conn->mtask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2299,6 +2082,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ }
+
+ /*
++ * The LLD either freed/unset the lock on us, or userspace called
++ * stop but did not create a proper connection (connection was never
++ * bound or it was unbound then stop was called).
++ */
++ if (!conn->recv_lock) {
++ spin_unlock_bh(&session->lock);
++ mutex_unlock(&session->eh_mutex);
++ return;
++ }
++
++ /*
+ * When this is called for the in_login state, we only want to clean
+ * up the login task and connection. We do not need to block and set
+ * the recovery state again
+@@ -2314,6 +2108,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
++
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +2125,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2361,7 +2160,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2500,14 +2299,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,7 +2310,8 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+@@ -2574,15 +2366,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2642,35 +2425,29 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
++ if (!session->netdev)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->netdev);
++ len = sprintf(buf, "%s\n", session->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
++ if (!session->hwaddress)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
++ len = sprintf(buf, "%s\n", session->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
++ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
++ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
++
+ default:
+ return -ENOSYS;
+ }
+@@ -2682,20 +2459,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
++ if (!session->netdev)
++ session->netdev = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
++ if (!session->hwaddress)
++ session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
++ if (!session->initiatorname)
++ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
+ default:
+ return -ENOSYS;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..ca7bb6f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,24 +30,23 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
++#define ISCSI_SESSION_ATTRS 19
+ #define ISCSI_CONN_ATTRS 13
+ #define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_TRANSPORT_VERSION "2.0-869"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+@@ -64,12 +63,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +78,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,142 +116,21 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
++ atomic_set(&ihost->nr_scans, 0);
+
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
++ snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
++ shost->host_no);
+ ihost->scan_workq = create_singlethread_workqueue(
+ ihost->scan_workq_name);
+ if (!ihost->scan_workq)
+@@ -257,10 +139,10 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ }
+
+ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ destroy_workqueue(ihost->scan_workq);
+ return 0;
+@@ -403,24 +285,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+ /**
+ * iscsi_scan_finished - helper to report when running scans are done
+ * @shost: scsi host
+@@ -431,7 +295,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+ */
+ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ /*
+ * qla4xxx will have kicked off some session unblocks before calling
+ * scsi_scan_host, so just wait for them to complete.
+@@ -440,61 +304,22 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
++ return 0;
+ }
+
+ static void iscsi_scan_session(struct work_struct *work)
+@@ -502,14 +327,19 @@ static void iscsi_scan_session(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, scan_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
++ struct iscsi_host *ihost = shost->shost_data;
++ unsigned long flags;
+
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
++ spin_lock_irqsave(&session->lock, flags);
++ if (session->state != ISCSI_SESSION_LOGGED_IN) {
++ spin_unlock_irqrestore(&session->lock, flags);
++ goto done;
++ }
++ spin_unlock_irqrestore(&session->lock, flags);
+
+- iscsi_user_scan_session(&session->dev, &scan_data);
++ scsi_scan_target(&session->dev, 0, session->target_id,
++ SCAN_WILD_CARD, 1);
++done:
+ atomic_dec(&ihost->nr_scans);
+ }
+
+@@ -549,7 +379,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unblock_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ /*
+@@ -617,19 +447,15 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unbind_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
++ if (list_empty(&session->host_list)) {
+ mutex_unlock(&ihost->mutex);
+ return;
+ }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
++ list_del_init(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+ scsi_remove_target(&session->dev);
+@@ -639,18 +465,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ static int iscsi_unbind_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ return queue_work(ihost->scan_workq, &session->unbind_work);
+ }
+
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+@@ -659,6 +485,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->recovery_tmo = 120;
+ session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+ INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+ INIT_WORK(&session->block_work, __iscsi_block_session);
+@@ -671,57 +498,22 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
++ struct iscsi_host *ihost;
+ unsigned long flags;
+- unsigned int id = target_id;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+@@ -737,6 +529,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ list_add(&session->sess_list, &sesslist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
++
+ iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+ return 0;
+
+@@ -750,18 +546,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+ * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+ */
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -797,7 +593,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+ int err;
+
+@@ -863,7 +659,6 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -876,17 +671,18 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * non-zero.
+ */
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -1219,20 +1015,21 @@ int iscsi_session_event(struct iscsi_cls_session *session,
+ EXPORT_SYMBOL_GPL(iscsi_session_event);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.cmds_max,
++ ev->u.c_session.queue_depth,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1307,7 +1104,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +1113,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1408,7 +1193,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,22 +1206,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+@@ -1568,8 +1337,11 @@ iscsi_if_rx(struct sk_buff *skb)
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1349,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1604,16 +1375,17 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+ iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+ iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
++
+ /*
+ * iSCSI session attrs
+ */
+ #define iscsi_session_attr_show(param, perm) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+@@ -1643,14 +1415,11 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+ iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+
+ static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_priv_session_state(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
+ return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+@@ -1658,11 +1427,9 @@ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1677,10 +1444,9 @@ iscsi_priv_session_attr(recovery_tmo, "%d");
+ */
+ #define iscsi_host_attr_show(param) \
+ static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_host_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
++ struct Scsi_Host *shost = transport_class_to_shost(cdev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+ }
+@@ -1697,7 +1463,7 @@ iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1471,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,7 +1479,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1721,7 +1487,7 @@ do { \
+ #define SETUP_HOST_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->host_attrs[count] = &class_device_attr_host_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,24 +1577,22 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
+ transport_container_register(&priv->t.host_attrs);
+
+ SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+@@ -1886,8 +1650,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+ SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+ SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+ SETUP_PRIV_SESSION_RD_ATTR(state);
+
+@@ -1901,9 +1663,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1691,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1712,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,8 +1724,8 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+@@ -1988,8 +1745,6 @@ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -2002,7 +1757,6 @@ static void __exit iscsi_transport_exit(void)
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..7b90b63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -52,7 +49,9 @@ struct device;
+ #endif
+
+ #define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_MGMT_CMDS_MAX 16 /* must be power of 2 */
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+@@ -70,10 +69,7 @@ enum {
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+@@ -86,6 +82,18 @@ enum {
+ ISCSI_DIGEST_SIZE = sizeof(__u32),
+ };
+
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ unsigned data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
++};
+
+ enum {
+ ISCSI_TASK_COMPLETED,
+@@ -93,7 +101,7 @@ enum {
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+ * Because LLDs allocate their hdr differently, this is a pointer
+ * and length to that storage. It must be setup at session
+@@ -110,7 +118,6 @@ struct iscsi_task {
+ /* offset in unsolicited stream (bytes); */
+ unsigned unsol_offset;
+ unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
+ struct iscsi_conn *conn; /* used connection */
+
+@@ -121,9 +128,9 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
++static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+ {
+- return (void*)task->hdr + task->hdr_len;
++ return (void*)ctask->hdr + ctask->hdr_len;
+ }
+
+ /* Connection's states */
+@@ -139,6 +146,11 @@ struct iscsi_conn {
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+@@ -147,7 +159,7 @@ struct iscsi_conn {
+ unsigned long last_ping;
+ int ping_timeout;
+ int recv_timeout;
+- struct iscsi_task *ping_task;
++ struct iscsi_mgmt_task *ping_mtask;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,8 +175,9 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
+@@ -195,6 +208,9 @@ struct iscsi_conn {
+ /* remote portal currently connected to */
+ int portal_port;
+ char portal_address[ISCSI_ADDRESS_BUF_LEN];
++ /* local address */
++ int local_port;
++ char local_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,7 +225,6 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+@@ -230,7 +245,6 @@ enum {
+ };
+
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+ /*
+ * Syncs up the scsi eh thread with the iscsi eh thread when sending
+ * task management functions. This must be taken before the session
+@@ -266,8 +280,10 @@ struct iscsi_session {
+ char *password;
+ char *password_in;
+ char *targetname;
+- char *ifacename;
+ char *initiatorname;
++ /* hw address or netdev iscsi connection is bound to */
++ char *hwaddress;
++ char *netdev;
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +297,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
+ struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -307,44 +315,42 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
++
+ /*
+ * iSCSI host helpers.
+ */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+ extern int iscsi_host_set_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf,
+ int buflen);
+ extern int iscsi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+
+ /*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
++
+ #define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++ iscsi_cls_session_printk(prefix, \
++ (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,29 +359,25 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+
+ #define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
++ iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+ extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+- char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
++extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
++extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+ /*
+ * generic helpers
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..aab1eae 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -30,11 +30,11 @@
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +58,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -86,9 +83,17 @@ struct iscsi_transport {
+ /* LLD sets this to indicate what values it can export to sysfs */
+ uint64_t param_mask;
+ uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint16_t, uint16_t,
++ uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -113,15 +118,20 @@ struct iscsi_transport {
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
+ int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+@@ -162,10 +172,9 @@ enum {
+ ISCSI_SESSION_FREE,
+ };
+
+-#define ISCSI_MAX_TARGET -1
+-
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+ spinlock_t lock;
+ struct work_struct block_work;
+@@ -177,7 +186,7 @@ struct iscsi_cls_session {
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+ int state;
+ int sid; /* session id */
+@@ -194,20 +203,12 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
++struct iscsi_host {
++ struct list_head sessions;
+ atomic_t nr_scans;
+ struct mutex mutex;
+ struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
++ char scan_workq_name[KOBJ_NAME_LEN];
+ };
+
+ /*
+@@ -221,26 +222,22 @@ struct iscsi_endpoint {
+
+ extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+ extern int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18-EL5.2/iscsi_02_count_fmr_align_violations.patch b/kernel_patches/backport/2.6.18-EL5.2/iscsi_02_count_fmr_align_violations.patch
new file mode 100644
index 0000000..9bf2d19
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.2/iscsi_02_count_fmr_align_violations.patch
@@ -0,0 +1,24 @@
+From 02753dd2caabfe6b1885cb80a8fb8532b416108d Mon Sep 17 00:00:00 2001
+From: Eli Dorfman <elid at voltaire.com>
+Date: Tue, 29 Apr 2008 10:12:39 +0300
+Subject: [PATCH] IB/iSER: Count fmr alignment violations per session
+
+Count fmr alignment violations per session
+as part of the iscsi statistics.
+
+Signed-off-by: Eli Dorfman <elid at voltaire.com>
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 7b90b63..cd3ca63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -225,6 +225,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+--
+1.5.5
+
diff --git a/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
new file mode 100644
index 0000000..798571f
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
@@ -0,0 +1,151 @@
+From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 29 Jun 2008 15:41:12 +0300
+Subject: [PATCH] copmat patch for RHEL5 and SLES10
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
+ 1 files changed, 54 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index ca7bb6f..0ccd7e2 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -20,6 +20,8 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
++#include <linux/version.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <net/tcp.h>
+@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ * the async scanning code (drivers like iscsi_tcp do login and
+ * scanning from userspace).
+ */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
++ if (shost->hostt->scan_finished) {
++ if (queue_work(ihost->scan_workq, &session->scan_work))
++ atomic_inc(&ihost->nr_scans);
++ }
++#endif
+ }
+
+ /**
+@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ * Malformed skbs with wrong lengths or invalid creds are not processed.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
+ return 0;
+
+ release_nls:
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+@@ -1753,7 +1768,7 @@ unregister_transport_class:
+ static void __exit iscsi_transport_exit(void)
+ {
+ destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
deleted file mode 100644
index 798571f..0000000
--- a/kernel_patches/backport/2.6.18-EL5.2/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
-From: Doron Shoham <dorons at voltaire.com>
-Date: Sun, 29 Jun 2008 15:41:12 +0300
-Subject: [PATCH] copmat patch for RHEL5 and SLES10
-
-Signed-off-by: Doron Shoham <dorons at voltaire.com>
----
- drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
- 1 files changed, 54 insertions(+), 39 deletions(-)
-
-diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
-index ca7bb6f..0ccd7e2 100644
---- a/drivers/scsi/scsi_transport_iscsi.c
-+++ b/drivers/scsi/scsi_transport_iscsi.c
-@@ -20,6 +20,8 @@
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-+#include <linux/version.h>
-+#include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/mutex.h>
- #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
-- if (shost->hostt->scan_finished) {
-- if (queue_work(ihost->scan_workq, &session->scan_work))
-- atomic_inc(&ihost->nr_scans);
-- }
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
-+ if (shost->hostt->scan_finished) {
-+ if (queue_work(ihost->scan_workq, &session->scan_work))
-+ atomic_inc(&ihost->nr_scans);
-+ }
-+#endif
- }
-
- /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
- * Malformed skbs with wrong lengths or invalid creds are not processed.
- */
- static void
--iscsi_if_rx(struct sk_buff *skb)
-+iscsi_if_rx(struct sock *sk, int len)
- {
-+ struct sk_buff *skb;
-+
- mutex_lock(&rx_queue_mutex);
-- while (skb->len >= NLMSG_SPACE(0)) {
-- int err;
-- uint32_t rlen;
-- struct nlmsghdr *nlh;
-- struct iscsi_uevent *ev;
--
-- nlh = nlmsg_hdr(skb);
-- if (nlh->nlmsg_len < sizeof(*nlh) ||
-- skb->len < nlh->nlmsg_len) {
-- break;
-+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-+ if (NETLINK_CREDS(skb)->uid) {
-+ skb_pull(skb, skb->len);
-+ goto free_skb;
- }
-
-- ev = NLMSG_DATA(nlh);
-- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-- if (rlen > skb->len)
-- rlen = skb->len;
-+ while (skb->len >= NLMSG_SPACE(0)) {
-+ int err;
-+ uint32_t rlen;
-+ struct nlmsghdr *nlh;
-+ struct iscsi_uevent *ev;
-
-- err = iscsi_if_recv_msg(skb, nlh);
-- if (err) {
-- ev->type = ISCSI_KEVENT_IF_ERROR;
-- ev->iferror = err;
-- }
-- do {
-- /*
-- * special case for GET_STATS:
-- * on success - sending reply and stats from
-- * inside of if_recv_msg(),
-- * on error - fall through.
-- */
-- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ nlh = nlmsg_hdr(skb);
-+ if (nlh->nlmsg_len < sizeof(*nlh) ||
-+ skb->len < nlh->nlmsg_len) {
- break;
-- err = iscsi_if_send_reply(
-- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-- } while (err < 0 && err != -ECONNREFUSED);
-- skb_pull(skb, rlen);
-+ }
-+
-+ ev = NLMSG_DATA(nlh);
-+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-+ if (rlen > skb->len)
-+ rlen = skb->len;
-+
-+ err = iscsi_if_recv_msg(skb, nlh);
-+ if (err) {
-+ ev->type = ISCSI_KEVENT_IF_ERROR;
-+ ev->iferror = err;
-+ }
-+ do {
-+ /*
-+ * special case for GET_STATS:
-+ * on success - sending reply and stats from
-+ * inside of if_recv_msg(),
-+ * on error - fall through.
-+ */
-+ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ break;
-+ err = iscsi_if_send_reply(
-+ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-+ } while (err < 0 && err != -ECONNREFUSED);
-+ skb_pull(skb, rlen);
-+ }
-+free_skb:
-+ kfree_skb(skb);
- }
- mutex_unlock(&rx_queue_mutex);
- }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
- return 0;
-
- release_nls:
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- unregister_session_class:
- transport_class_unregister(&iscsi_session_class);
- unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
- static void __exit iscsi_transport_exit(void)
- {
- destroy_workqueue(iscsi_eh_timer_workq);
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- transport_class_unregister(&iscsi_connection_class);
- transport_class_unregister(&iscsi_session_class);
- transport_class_unregister(&iscsi_host_class);
---
-1.5.3.8
-
diff --git a/kernel_patches/backport/2.6.18-EL5.2/iser_01_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.18-EL5.2/iser_01_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..155795d
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.2/iser_01_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From ad1e1df62ff096cc90257b0b42e843d0773ae981 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 11:37:50 +0300
+Subject: [PATCH] iser backports
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18-EL5.2/iser_02_fix_iscsi_if.h b/kernel_patches/backport/2.6.18-EL5.2/iser_02_fix_iscsi_if.h
new file mode 100644
index 0000000..02c8a81
--- /dev/null
+++ b/kernel_patches/backport/2.6.18-EL5.2/iser_02_fix_iscsi_if.h
@@ -0,0 +1,145 @@
+From c703d2c0ca18a6a5b8f4ecbd5c02654a15fb11ff Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 14:26:17 +0300
+Subject: [PATCH] fix iscsi_if.h
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/iscsi_if.h | 93 ++++++++++++++++++++++-------------------------
+ 1 files changed, 43 insertions(+), 50 deletions(-)
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..e19e584 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -50,7 +50,6 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+ ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+ ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+@@ -79,12 +78,6 @@ struct iscsi_uevent {
+ uint16_t cmds_max;
+ uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -257,49 +250,42 @@ enum iscsi_param {
+
+ ISCSI_PARAM_PING_TMO,
+ ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
++#define ISCSI_USERNAME (1 << ISCSI_PARAM_USERNAME)
++#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN)
++#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD)
++#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN)
++#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT)
++#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO)
++#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO)
++#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO)
++#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO)
++#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO)
+
+ /* iSCSI HBA params */
+ enum iscsi_host_param {
+@@ -310,13 +296,20 @@ enum iscsi_host_param {
+ ISCSI_HOST_PARAM_MAX,
+ };
+
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_HOST_HWADDRESS (1 << ISCSI_HOST_PARAM_HWADDRESS)
++#define ISCSI_HOST_INITIATOR_NAME (1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
++#define ISCSI_HOST_NETDEV_NAME (1 << ISCSI_HOST_PARAM_NETDEV_NAME)
++#define ISCSI_HOST_IPADDRESS (1 << ISCSI_HOST_PARAM_IPADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18_FC6/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch b/kernel_patches/backport/2.6.18_FC6/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
new file mode 100644
index 0000000..cd24137
--- /dev/null
+++ b/kernel_patches/backport/2.6.18_FC6/iscsi_01_sync_kernel_code_with_release_2.0-869.2.patch
@@ -0,0 +1,4746 @@
+From 89ac09ec66db75fbda1bd77918066fb2ddebac38 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Mon, 25 Aug 2008 16:16:26 +0300
+Subject: [PATCH] iscsi_01_sync_kernel_code_with_release_2.0-869.2
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 529 +++++++-------
+ drivers/scsi/iscsi_tcp.h | 7 +-
+ drivers/scsi/libiscsi.c | 1457 +++++++++++++++--------------------
+ drivers/scsi/scsi_transport_iscsi.c | 500 +++---------
+ include/scsi/libiscsi.h | 108 ++--
+ include/scsi/scsi_transport_iscsi.h | 93 ++--
+ 6 files changed, 1120 insertions(+), 1574 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..8a17867 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -64,10 +64,6 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -498,63 +494,58 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- tcp_task->r2t = NULL;
++ tcp_ctask->r2t = NULL;
+ }
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (tcp_ctask->exp_datasn != datasn) {
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, datasn);
+ return ISCSI_ERR_DATASN;
+ }
+
+- tcp_task->exp_datasn++;
++ tcp_ctask->exp_datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
+ debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ __FUNCTION__, tcp_ctask->data_offset,
++ tcp_conn->in.datalen, scsi_bufflen(sc));
+ return ISCSI_ERR_DATA_OFFSET;
+ }
+
+@@ -567,8 +558,8 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
++ res_count <= scsi_bufflen(sc)))
++ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+@@ -582,7 +573,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,7 +583,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -603,8 +594,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -624,14 +615,14 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+@@ -644,23 +635,23 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_datasn != r2tsn){
++ debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
++ __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
+ return ISCSI_ERR_R2TSN;
+ }
+
+ /* fill-in new R2T associated with the task */
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
++ if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ ctask->itt);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+@@ -668,7 +659,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ if (r2t->data_length == 0) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -679,12 +670,12 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
++ if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ r2t->data_offset, scsi_bufflen(ctask->sc));
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ return ISCSI_ERR_DATALEN;
+ }
+@@ -692,13 +683,13 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->exp_datasn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
+ conn->r2t_pdus_cnt++;
+
+- iscsi_requeue_task(task);
++ iscsi_requeue_ctask(ctask);
+ return 0;
+ }
+
+@@ -741,8 +732,10 @@ static int
+ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+@@ -760,7 +753,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
++ rc = iscsi_verify_itt(conn, hdr, &itt);
+ if (rc)
+ return rc;
+
+@@ -769,21 +762,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
++ ctask = session->cmds[itt];
+ spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
++ rc = iscsi_data_rsp(conn, ctask);
++ spin_unlock(&conn->session->lock);
++ if (rc)
++ return rc;
+ if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+
+ /*
+ * Setup copy of Data-In into the Scsi_Cmnd
+@@ -798,21 +785,17 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+
+ debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+ "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
++ tcp_ctask->data_offset,
+ tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
+- return rc;
++ return iscsi_segment_seek_sg(&tcp_conn->in.segment,
++ scsi_sglist(ctask->sc),
++ scsi_sg_count(ctask->sc),
++ tcp_ctask->data_offset,
++ tcp_conn->in.datalen,
++ iscsi_tcp_process_data_in,
++ rx_hash);
+ }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+ if (tcp_conn->in.datalen) {
+ iscsi_tcp_data_recv_prep(tcp_conn);
+@@ -821,17 +804,15 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
+- else
++ else if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ spin_lock(&session->lock);
++ rc = iscsi_r2t_rsp(conn, ctask);
++ spin_unlock(&session->lock);
++ } else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -1193,7 +1174,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
++ debug_tcp("%s(%p%s)\n", __FUNCTION__, tcp_conn,
+ conn->hdrdgst_en? ", digest enabled" : "");
+
+ /* Clear the data segment - needs to be filled in by the
+@@ -1202,7 +1183,7 @@ iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
+
+ /* If header digest is enabled, compute the CRC and
+ * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
++ * sure that both iscsi_tcp_ctask and mtask have
+ * sufficient room.
+ */
+ if (conn->hdrdgst_en) {
+@@ -1234,7 +1215,7 @@ iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
++ debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __FUNCTION__,
+ tcp_conn, offset, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+@@ -1259,7 +1240,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ struct hash_desc *tx_hash = NULL;
+ unsigned int hdr_spec_len;
+
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
++ debug_tcp("%s(%p, datalen=%d%s)\n", __FUNCTION__, tcp_conn, len,
+ conn->datadgst_en? ", digest enabled" : "");
+
+ /* Make sure the datalen matches what the caller
+@@ -1277,7 +1258,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1288,7 +1269,7 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ * Called under connection lock.
+ **/
+ static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
+@@ -1305,8 +1286,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1324,76 +1305,87 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_ctask - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_ctask_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct scsi_cmnd *sc = ctask->sc;
+ int err;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
+- */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
+-
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
+-
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
+- }
+-
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++ tcp_ctask->sent = 0;
++ tcp_ctask->exp_datasn = 0;
+
+ /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ debug_scsi("ctask deq [cid %d itt 0x%x imm %d unsol %d]\n",
++ conn->id, ctask->itt, ctask->imm_count,
++ ctask->unsol_count);
++ iscsi_tcp_send_hdr_prep(conn, ctask->hdr, ctask->hdr_len);
+
+- if (!task->imm_count)
++ if (!ctask->imm_count)
+ return 0;
+
+ /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
++ err = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc), scsi_sg_count(sc),
++ 0, ctask->imm_count);
+ if (err)
+ return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ tcp_ctask->sent += ctask->imm_count;
++ ctask->imm_count = 0;
++ return 0;
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ **/
++static int
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ int rc;
++
++ /* Flush any pending data first. */
++ rc = iscsi_tcp_flush(conn);
++ if (rc < 0)
++ return rc;
++
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock_bh(&session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock_bh(&session->lock);
++ }
++
+ return 0;
+ }
+
+ /*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
++ * iscsi_tcp_ctask_xmit - xmit normal PDU task
++ * @conn: iscsi connection
++ * @ctask: iscsi command task
+ *
+ * We're expected to return 0 when everything was transmitted succesfully,
+ * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+ * of error.
+ */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+ int rc = 0;
+
+ flush:
+@@ -1402,39 +1394,32 @@ flush:
+ if (rc < 0)
+ return rc;
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
+- return 0;
+- }
+-
+ /* Are we done already? */
+ if (sc->sc_data_direction != DMA_TO_DEVICE)
+ return 0;
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (ctask->unsol_count != 0) {
++ struct iscsi_data *hdr = &tcp_ctask->unsol_dtask.hdr;
+
+ /* Prepare a header for the unsolicited PDU.
+ * The amount of data we want to send will be
+- * in task->data_count.
++ * in ctask->data_count.
+ * FIXME: return the data count instead.
+ */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ iscsi_prep_unsolicit_data_pdu(ctask, hdr);
+
+ debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++ ctask->itt, tcp_ctask->sent, ctask->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
++ tcp_ctask->sent,
++ ctask->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
++ tcp_ctask->sent += ctask->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ goto flush;
+ } else {
+ struct iscsi_session *session = conn->session;
+@@ -1443,22 +1428,22 @@ flush:
+ /* All unsolicited PDUs sent. Check for solicited PDUs.
+ */
+ spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ if (r2t != NULL) {
+ /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
++ if (!iscsi_solicit_data_cont(conn, ctask, r2t)) {
+ debug_scsi(" done with r2t %p\n", r2t);
+
+- __kfifo_put(tcp_task->r2tpool.queue,
++ __kfifo_put(tcp_ctask->r2tpool.queue,
+ (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
++ tcp_ctask->r2t = r2t = NULL;
+ }
+ }
+
+ if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ r2t = tcp_ctask->r2t;
+ }
+ spin_unlock_bh(&session->lock);
+
+@@ -1469,19 +1454,19 @@ flush:
+ }
+
+ debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
++ r2t, r2t->solicit_datasn - 1, ctask->itt,
+ r2t->data_offset + r2t->sent, r2t->data_count);
+
+ iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+ sizeof(struct iscsi_hdr));
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
++ rc = iscsi_tcp_send_data_prep(conn, scsi_sglist(sc),
++ scsi_sg_count(sc),
+ r2t->data_offset + r2t->sent,
+ r2t->data_count);
+ if (rc)
+ goto fail;
+- tcp_task->sent += r2t->data_count;
++ tcp_ctask->sent += r2t->data_count;
+ r2t->sent += r2t->data_count;
+ goto flush;
+ }
+@@ -1498,7 +1483,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,14 +1493,18 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+ if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ goto free_tcp_conn;
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+@@ -1527,12 +1516,14 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
++free_tcp_conn:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Could not create connection due to crc32c "
+ "loading error. Make sure the crc32c "
+ "module is built as a module or into the "
+ "kernel\n");
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1553,6 +1544,7 @@ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+
+ spin_lock_bh(&session->lock);
+ tcp_conn->sock = NULL;
++ conn->recv_lock = NULL;
+ spin_unlock_bh(&session->lock);
+ sockfd_put(sock);
+ }
+@@ -1564,32 +1556,20 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+@@ -1640,8 +1620,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1665,8 +1643,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ if (err)
+ goto free_socket;
+
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
++ err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
++ &conn->local_port, kernel_getsockname);
+ if (err)
+ goto free_socket;
+
+@@ -1683,6 +1661,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+@@ -1696,6 +1681,21 @@ free_socket:
+ return err;
+ }
+
++/* called with host lock */
++static void
++iscsi_tcp_mtask_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
++{
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
++
++ /* Prepare PDU, optionally w/ immediate data */
++ iscsi_tcp_send_hdr_prep(conn, mtask->hdr, sizeof(*mtask->hdr));
++
++ /* If we have immediate data, attach a payload */
++ if (mtask->data_count)
++ iscsi_tcp_send_linear_data_prepare(conn, mtask->data,
++ mtask->data_count);
++}
++
+ static int
+ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ {
+@@ -1706,8 +1706,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1716,16 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4, NULL,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1734,11 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1749,11 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool);
+ }
+ }
+
+@@ -1818,6 +1818,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ return len;
+ }
+
++static int
++iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
++ char *buf)
++{
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
++ int len;
++
++ switch (param) {
++ case ISCSI_HOST_PARAM_IPADDRESS:
++ spin_lock_bh(&session->lock);
++ if (!session->leadconn)
++ len = -ENODEV;
++ else
++ len = sprintf(buf, "%s\n",
++ session->leadconn->local_address);
++ spin_unlock_bh(&session->lock);
++ break;
++ default:
++ return iscsi_host_get_param(shost, param, buf);
++ }
++ return len;
++}
++
+ static void
+ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ {
+@@ -1843,70 +1866,54 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ ctask->hdr = &tcp_ctask->hdr.cmd_hdr;
++ ctask->hdr_max = sizeof(tcp_ctask->hdr) - ISCSI_DIGEST_SIZE;
++ }
++
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ mtask->hdr = (struct iscsi_hdr *) &tcp_mtask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+@@ -1961,11 +1968,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+ ISCSI_HOST_INITIATOR_NAME |
+ ISCSI_HOST_NETDEV_NAME,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = 16,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1979,14 +1989,16 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+ /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
++ .get_host_param = iscsi_tcp_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_ctask_init,
++ .init_mgmt_task = iscsi_tcp_mtask_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2011,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..ed0b991 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -103,6 +103,11 @@ struct iscsi_data_task {
+ char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
+ };
+
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[ISCSI_DIGEST_SIZE]; /* Header-Digest */
++};
++
+ struct iscsi_r2t_info {
+ __be32 ttt; /* copied from R2T */
+ __be32 exp_statsn; /* copied from R2T */
+@@ -114,7 +119,7 @@ struct iscsi_r2t_info {
+ struct iscsi_data_task dtask; /* Data-Out header buf */
+ };
+
+-struct iscsi_tcp_task {
++struct iscsi_tcp_cmd_task {
+ struct iscsi_hdr_buff {
+ struct iscsi_cmd cmd_hdr;
+ char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..9975095 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -38,6 +38,14 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
++{
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
++}
++EXPORT_SYMBOL_GPL(class_to_transport_session);
++
+ /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+ #define SNA32_CHECK 2147483648UL
+
+@@ -79,170 +87,91 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ * xmit thread
+ */
+ if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
++ !list_empty(&session->leadconn->mgmtqueue))
++ scsi_queue_work(session->host,
++ &session->leadconn->xmitwork);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
++static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
+ {
+- unsigned exp_len = task->hdr_len + len;
++ unsigned exp_len = ctask->hdr_len + len;
+
+- if (exp_len > task->hdr_max) {
++ if (exp_len > ctask->hdr_max) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
++ ctask->hdr_len = exp_len;
+ return 0;
+ }
+
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++ unsigned hdrlength;
+ int rc;
+
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
++ ctask->hdr_len = 0;
++ rc = iscsi_add_hdr(ctask, sizeof(*hdr));
+ if (rc)
+ return rc;
+ hdr->opcode = ISCSI_OP_SCSI_CMD;
+ hdr->flags = ISCSI_ATTR_SIMPLE;
+ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
++ hdr->itt = build_itt(ctask->itt, session->age);
++ hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
+ hdr->cmdsn = cpu_to_be32(session->cmdsn);
+ session->cmdsn++;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ if (sc->cmd_len < MAX_COMMAND_SIZE)
++ memset(&hdr->cdb[sc->cmd_len], 0,
++ MAX_COMMAND_SIZE - sc->cmd_len);
+
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ ctask->imm_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,41 +187,40 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (scsi_bufflen(sc) >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(scsi_bufflen(sc),
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(hdr->dlength, ctask->imm_count);
+ } else
+ zero_data(hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min((session->first_burst),
++ (scsi_bufflen(sc))) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+ /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
++ hdrlength = ctask->hdr_len - sizeof(*hdr);
+
+ WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+ hdrlength /= ISCSI_PAD_LEN;
+@@ -300,180 +228,110 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ WARN_ON(hdrlength >= 256);
+ hdr->hlength = hdrlength & 0xFF;
+
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
++ if (conn->session->tt->init_cmd_task(conn->ctask))
++ return EIO;
+
+ conn->scsicmd_pdus_cnt++;
+ debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
++ "cmdsn %d win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ return 0;
+ }
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
+-
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
++ struct scsi_cmnd *sc = ctask->sc;
+
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
+-}
+-
+-void __iscsi_get_task(struct iscsi_task *task)
+-{
+- atomic_inc(&task->refcount);
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ atomic_inc(&ctask->refcount);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+ /*
+ * session lock must be held
+ */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ int err)
+ {
+ struct scsi_cmnd *sc;
+
+- sc = task->sc;
++ sc = ctask->sc;
+ if (!sc)
+ return;
+
+- if (task->state == ISCSI_TASK_PENDING)
++ if (ctask->state == ISCSI_TASK_PENDING)
+ /*
+ * cmd never made it to the xmit thread, so we should not count
+ * the cmd in the sequencing
+ */
+ conn->session->queued_cmdsn--;
+ else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
+
+ sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ if (conn->ctask == ctask)
++ conn->ctask = NULL;
+ /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++/**
++ * iscsi_free_mgmt_task - return mgmt task back to pool
++ * @conn: iscsi connection
++ * @mtask: mtask
++ *
++ * Must be called with session lock.
++ */
++void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
++ list_del_init(&mtask->running);
++ if (conn->login_mtask == mtask)
++ return;
+
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
++ if (conn->ping_mtask == mtask)
++ conn->ping_mtask = NULL;
++ __kfifo_put(conn->session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
+ }
++EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
+
+-static struct iscsi_task *
++static struct iscsi_mgmt_task *
+ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+ if (session->state == ISCSI_STATE_TERMINATE)
+ return NULL;
+@@ -483,56 +341,29 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ /*
+ * Login and Text are sent serially, in
+ * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
+ */
+- task = conn->login_task;
++ mtask = conn->login_mtask;
+ else {
+ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*)))
+ return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+ }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+
+ if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
+ } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
++ mtask->data_count = 0;
+
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ INIT_LIST_HEAD(&mtask->running);
++ list_add_tail(&mtask->running, &conn->mgmtqueue);
++ return mtask;
+ }
+
+ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+@@ -546,6 +377,7 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+ if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+ err = -EPERM;
+ spin_unlock_bh(&session->lock);
++ scsi_queue_work(session->host, &conn->xmitwork);
+ return err;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+@@ -554,7 +386,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+@@ -562,12 +394,12 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+ * then completes the command and task.
+ **/
+ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
++ struct iscsi_cmd_task *ctask, char *data,
+ int datalen)
+ {
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+@@ -591,7 +423,7 @@ invalid_datalen:
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,18 +433,6 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
+-
+ if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+ ISCSI_FLAG_CMD_OVERFLOW)) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+@@ -620,17 +440,19 @@ invalid_datalen:
+ if (res_count > 0 &&
+ (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+ res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+ scsi_set_resid(sc, res_count);
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
++ ISCSI_FLAG_CMD_BIDI_OVERFLOW))
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -655,9 +477,9 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ {
+ struct iscsi_nopout hdr;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- if (!rhdr && conn->ping_task)
++ if (!rhdr && conn->ping_mtask)
+ return;
+
+ memset(&hdr, 0, sizeof(struct iscsi_nopout));
+@@ -671,9 +493,18 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+ } else
+ hdr.ttt = RESERVED_ITT;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
++ if (!mtask) {
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
++ return;
++ }
++
++ /* only track our nops */
++ if (!rhdr) {
++ conn->ping_mtask = mtask;
++ conn->last_ping = jiffies;
++ }
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+@@ -702,31 +533,6 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -737,28 +543,108 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+ * queuecommand or send generic. session lock must be held and verify
+ * itt must have been called.
+ */
+-int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, int datalen)
++static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, int datalen)
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+ conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
++ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
++ datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (conn->ping_mtask != mtask) {
++ /*
++ * If this is not in response to one of our
++ * nops then it must be from userspace.
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
++ datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ } else
++ mod_timer(&conn->transport_timer,
++ jiffies + conn->recv_timeout);
++ iscsi_free_mgmt_task(conn, mtask);
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
+ iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+
+ switch(opcode) {
+@@ -785,104 +671,11 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+
+ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+@@ -896,63 +689,51 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "received itt %x expected session "
++ "age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
++ "with itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ iscsi_conn_printk(KERN_ERR, conn,
++ "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,6 +755,61 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
++static void iscsi_prep_mtask(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = mtask->hdr;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++
++ if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
++ hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, session->age);
++ /*
++ * TODO: We always use immediate, so we never hit this.
++ * If we start to send tmfs or nops as non-immediate then
++ * we should start checking the cmdsn numbers for mgmt tasks.
++ */
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
++ session->queued_cmdsn++;
++ session->cmdsn++;
++ }
++ }
++
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
++ mtask->data_count);
++}
++
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
++{
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc;
++
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
++ conn->session->state = ISCSI_STATE_LOGGING_OUT;
++ spin_unlock_bh(&conn->session->lock);
++
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ spin_lock_bh(&conn->session->lock);
++ if (rc)
++ return rc;
++
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
++ return 0;
++}
++
+ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ {
+ struct iscsi_session *session = conn->session;
+@@ -991,38 +827,37 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
+ return 0;
+ }
+
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
++static int iscsi_xmit_ctask(struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task = conn->task;
++ struct iscsi_cmd_task *ctask = conn->ctask;
+ int rc;
+
+- __iscsi_get_task(task);
++ __iscsi_get_ctask(ctask);
+ spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
++ rc = conn->session->tt->xmit_cmd_task(conn, ctask);
+ spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
+ if (!rc)
+- /* done with this task */
+- conn->task = NULL;
++ /* done with this ctask */
++ conn->ctask = NULL;
+ return rc;
+ }
+
+ /**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
++ * iscsi_requeue_ctask - requeue ctask to run from session workqueue
++ * @ctask: ctask to requeue
+ *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
++ * LLDs that need to run a ctask from the session workqueue should call
++ * this. The session lock must be held.
+ */
+-void iscsi_requeue_task(struct iscsi_task *task)
++void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+- list_move_tail(&task->running, &conn->requeue);
++ list_move_tail(&ctask->running, &conn->requeue);
+ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
++EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1044,8 +879,14 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ return -ENODATA;
+ }
+
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ rc = iscsi_xmit_ctask(conn);
++ if (rc)
++ goto again;
++ }
++
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1057,14 +898,17 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ */
+ check_mgmt:
+ while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
++ conn->mtask = list_entry(conn->mgmtqueue.next,
++ struct iscsi_mgmt_task, running);
++ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
++ iscsi_free_mgmt_task(conn, conn->mtask);
++ conn->mtask = NULL;
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ iscsi_prep_mtask(conn, conn->mtask);
++ list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
++ rc = iscsi_xmit_mtask(conn);
+ if (rc)
+ goto again;
+ }
+@@ -1074,21 +918,24 @@ check_mgmt:
+ if (conn->tmf_state == TMF_QUEUED)
+ break;
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
++ fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
+ continue;
+ }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
++ if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
++ fail_command(conn, conn->ctask, DID_ABORT << 16);
+ continue;
+ }
+- rc = iscsi_xmit_task(conn);
++
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ /*
+- * we could continuously get new task requests so
++ * we could continuously get new ctask requests so
+ * we need to check the mgmt queue for nops that need to
+ * be sent to aviod starvation
+ */
+@@ -1106,11 +953,11 @@ check_mgmt:
+ if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+ break;
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
++ conn->ctask = list_entry(conn->requeue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
++ rc = iscsi_xmit_ctask(conn);
+ if (rc)
+ goto again;
+ if (!list_empty(&conn->mgmtqueue))
+@@ -1154,12 +1001,11 @@ enum {
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+@@ -1168,11 +1014,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ host = sc->device->host;
+ spin_unlock(host->host_lock);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ session = iscsi_hostdata(host->hostdata);
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
++ reason = iscsi_session_chkready(session_to_cls(session));
+ if (reason) {
+ sc->result = reason;
+ goto fault;
+@@ -1227,39 +1072,26 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ goto reject;
+ }
+
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
++ session->queued_cmdsn++;
++
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
++ sc->SCp.ptr = (char *)ctask;
+
+- session->queued_cmdsn++;
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
+ spin_unlock(&session->lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ spin_lock(host->host_lock);
+ return 0;
+
+@@ -1272,13 +1104,8 @@ reject:
+ fault:
+ spin_unlock(&session->lock);
+ debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
++ scsi_set_resid(sc, scsi_bufflen(sc));
++ sc->scsi_done(sc);
+ spin_lock(host->host_lock);
+ return 0;
+ }
+@@ -1295,7 +1122,7 @@ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+@@ -1309,13 +1136,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+@@ -1377,11 +1200,11 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ int timeout)
+ {
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
++ mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+ NULL, 0);
+- if (!task) {
++ if (!mtask) {
+ spin_unlock_bh(&session->lock);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ spin_lock_bh(&session->lock);
+@@ -1397,6 +1220,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);
++ scsi_queue_work(session->host, &conn->xmitwork);
+
+ /*
+ * block eh thread until:
+@@ -1415,7 +1239,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
++ /* if the session drops it will clean up the mtask */
+ if (age != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN)
+ return -ENOTCONN;
+@@ -1429,51 +1253,48 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+ int error)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_cmd_task *ctask, *tmp;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
++ conn->ctask = NULL;
+
+ /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, error << 16);
+ }
+ }
+
+ /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ if (lun == ctask->sc->device->lun || lun == -1) {
+ debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+ }
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
++static void iscsi_suspend_tx(struct iscsi_conn *conn)
+ {
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
++ scsi_flush_work(conn->session->host);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+
+ static void iscsi_start_tx(struct iscsi_conn *conn)
+ {
+ clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+
+ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+@@ -1484,7 +1305,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+
+ cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
++ session = class_to_transport_session(cls_session);
+
+ debug_scsi("scsi cmd %p timedout\n", scmd);
+
+@@ -1522,7 +1343,7 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ jiffies))
+ rc = EH_RESET_TIMER;
+ /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
++ if (conn->ping_mtask)
+ rc = EH_RESET_TIMER;
+ done:
+ spin_unlock(&session->lock);
+@@ -1546,7 +1367,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
+
+ recv_timeout *= HZ;
+ last_recv = conn->last_recv;
+- if (conn->ping_task &&
++ if (conn->ping_mtask &&
+ time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+ jiffies)) {
+ iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+@@ -1572,30 +1393,27 @@ done:
+ spin_unlock(&session->lock);
+ }
+
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
++static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_tm *hdr)
+ {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_tm *hdr;
+ int rc, age;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ /*
+@@ -1624,17 +1442,17 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->eh_abort_cnt++;
+ age = session->age;
+
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
++ if (ctask->state == ISCSI_TASK_PENDING) {
++ fail_command(conn, ctask, DID_ABORT << 16);
+ goto success;
+ }
+
+@@ -1644,7 +1462,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ conn->tmf_state = TMF_QUEUED;
+
+ hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ iscsi_prep_abort_task_pdu(ctask, hdr);
+
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+ rc = FAILED;
+@@ -1654,20 +1472,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ switch (conn->tmf_state) {
+ case TMF_SUCCESS:
+ spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+ iscsi_suspend_tx(conn);
+ /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
++ * clean up task if aborted. grab the recv lock as a writer
+ */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
++ fail_command(conn, ctask, DID_ABORT << 16);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+ iscsi_start_tx(conn);
+ goto success_unlocked;
+ case TMF_TIMEDOUT:
+@@ -1677,7 +1491,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ case TMF_NOT_FOUND:
+ if (!sc->SCp.ptr) {
+ conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ /* ctask completed before tmf abort response */
+ debug_scsi("sc completed while abort in progress\n");
+ goto success;
+ }
+@@ -1690,7 +1504,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ success:
+ spin_unlock_bh(&session->lock);
+ success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+
+@@ -1698,7 +1512,7 @@ failed:
+ spin_unlock_bh(&session->lock);
+ failed_unlocked:
+ debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
++ ctask ? ctask->itt : 0);
+ mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+@@ -1716,15 +1530,12 @@ static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+
+ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
+ struct iscsi_conn *conn;
+ struct iscsi_tm *hdr;
+ int rc = FAILED;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+ debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+
+ mutex_lock(&session->eh_mutex);
+@@ -1767,11 +1578,13 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
+-
++ /* need to grab the recv lock then session lock */
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+ fail_all_commands(conn, sc->device->lun, DID_ERROR);
+ conn->tmf_state = TMF_INITIAL;
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+ iscsi_start_tx(conn);
+ goto done;
+@@ -1847,203 +1660,177 @@ void iscsi_pool_free(struct iscsi_pool *q)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
++ * @cmds_max: scsi host can queue
++ * @qdepth: scsi host cmds per lun
++ * @cmd_task_size: LLD ctask private data size
++ * @mgmt_task_size: LLD mtask private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
+- return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
++ if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
++ if (qdepth != 0)
++ printk(KERN_ERR "iscsi: invalid queue depth of %d. "
++ "Queue depth must be between 1 and %d.\n",
++ qdepth, ISCSI_MAX_CMD_PER_LUN);
++ qdepth = ISCSI_DEF_CMD_PER_LUN;
+ }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
++ if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
++ cmds_max < 2) {
++ if (cmds_max != 0)
++ printk(KERN_ERR "iscsi: invalid can_queue of %d. "
++ "can_queue must be a power of 2 and between "
++ "2 and %d - setting to %d.\n", cmds_max,
++ ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
++ cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
+ }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++
++ /* the iscsi layer takes one task for reserve */
++ shost->can_queue = cmds_max - 1;
++ shost->cmd_per_lun = qdepth;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
++ *hostno = shost->host_no;
++
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+ session->fast_abort = 1;
+ session->lu_reset_timeout = 15;
+ session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = cmds_max;
+ session->queued_cmdsn = session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+ mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
+
+- if (iscsi_add_session(cls_session, id))
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
++
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool);
++mgmtpool_alloc_fail:
+ iscsi_pool_free(&session->cmdpool);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
++ iscsi_remove_session(cls_session);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool);
+ iscsi_pool_free(&session->cmdpool);
+
+ kfree(session->password);
+@@ -2051,10 +1838,12 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ kfree(session->username);
+ kfree(session->username_in);
+ kfree(session->targetname);
++ kfree(session->netdev);
++ kfree(session->hwaddress);
+ kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+- iscsi_destroy_session(cls_session);
++ iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,26 +1851,22 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+@@ -2100,30 +1885,30 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+ INIT_LIST_HEAD(&conn->requeue);
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+ init_timer(&conn->tmf_timer);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2183,7 +1968,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+ if (session->leadconn == conn)
+ session->leadconn = NULL;
+@@ -2255,7 +2040,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
++ iscsi_unblock_session(session_to_cls(session));
+ wake_up(&conn->ehwait);
+ return 0;
+ }
+@@ -2264,23 +2049,21 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ iscsi_free_mgmt_task(conn, mtask);
+ }
+
+- conn->task = NULL;
++ conn->mtask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2299,6 +2082,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ }
+
+ /*
++ * The LLD either freed/unset the lock on us, or userspace called
++ * stop but did not create a proper connection (connection was never
++ * bound or it was unbound then stop was called).
++ */
++ if (!conn->recv_lock) {
++ spin_unlock_bh(&session->lock);
++ mutex_unlock(&session->eh_mutex);
++ return;
++ }
++
++ /*
+ * When this is called for the in_login state, we only want to clean
+ * up the login task and connection. We do not need to block and set
+ * the recovery state again
+@@ -2314,6 +2108,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ spin_unlock_bh(&session->lock);
+
+ iscsi_suspend_tx(conn);
++
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +2125,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2361,7 +2160,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2500,14 +2299,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,7 +2310,8 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+@@ -2574,15 +2366,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_PASSWORD_IN:
+ len = sprintf(buf, "%s\n", session->password_in);
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2642,35 +2425,29 @@ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
++ if (!session->netdev)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->netdev);
++ len = sprintf(buf, "%s\n", session->netdev);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
++ if (!session->hwaddress)
+ len = sprintf(buf, "%s\n", "default");
+ else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
++ len = sprintf(buf, "%s\n", session->hwaddress);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
++ if (!session->initiatorname)
+ len = sprintf(buf, "%s\n", "unknown");
+ else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
++ len = sprintf(buf, "%s\n", session->initiatorname);
+ break;
++
+ default:
+ return -ENOSYS;
+ }
+@@ -2682,20 +2459,20 @@ EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+ char *buf, int buflen)
+ {
+- struct iscsi_host *ihost = shost_priv(shost);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
++ if (!session->netdev)
++ session->netdev = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
++ if (!session->hwaddress)
++ session->hwaddress = kstrdup(buf, GFP_KERNEL);
+ break;
+ case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
++ if (!session->initiatorname)
++ session->initiatorname = kstrdup(buf, GFP_KERNEL);
+ break;
+ default:
+ return -ENOSYS;
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..ca7bb6f 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,24 +30,23 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
++#define ISCSI_SESSION_ATTRS 19
+ #define ISCSI_CONN_ATTRS 13
+ #define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_TRANSPORT_VERSION "2.0-869"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+@@ -64,12 +63,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +78,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,142 +116,21 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
++ atomic_set(&ihost->nr_scans, 0);
+
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
++ snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
++ shost->host_no);
+ ihost->scan_workq = create_singlethread_workqueue(
+ ihost->scan_workq_name);
+ if (!ihost->scan_workq)
+@@ -257,10 +139,10 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ }
+
+ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ destroy_workqueue(ihost->scan_workq);
+ return 0;
+@@ -403,24 +285,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+ /**
+ * iscsi_scan_finished - helper to report when running scans are done
+ * @shost: scsi host
+@@ -431,7 +295,7 @@ EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+ */
+ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ {
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ /*
+ * qla4xxx will have kicked off some session unblocks before calling
+ * scsi_scan_host, so just wait for them to complete.
+@@ -440,61 +304,22 @@ int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
++ return 0;
+ }
+
+ static void iscsi_scan_session(struct work_struct *work)
+@@ -502,14 +327,19 @@ static void iscsi_scan_session(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, scan_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
++ struct iscsi_host *ihost = shost->shost_data;
++ unsigned long flags;
+
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
++ spin_lock_irqsave(&session->lock, flags);
++ if (session->state != ISCSI_SESSION_LOGGED_IN) {
++ spin_unlock_irqrestore(&session->lock, flags);
++ goto done;
++ }
++ spin_unlock_irqrestore(&session->lock, flags);
+
+- iscsi_user_scan_session(&session->dev, &scan_data);
++ scsi_scan_target(&session->dev, 0, session->target_id,
++ SCAN_WILD_CARD, 1);
++done:
+ atomic_dec(&ihost->nr_scans);
+ }
+
+@@ -549,7 +379,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unblock_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ /*
+@@ -617,19 +447,15 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ container_of(work, struct iscsi_cls_session,
+ unbind_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ /* Prevent new scans and make sure scanning is not in progress */
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
++ if (list_empty(&session->host_list)) {
+ mutex_unlock(&ihost->mutex);
+ return;
+ }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
++ list_del_init(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+ scsi_remove_target(&session->dev);
+@@ -639,18 +465,18 @@ static void __iscsi_unbind_session(struct work_struct *work)
+ static int iscsi_unbind_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ return queue_work(ihost->scan_workq, &session->unbind_work);
+ }
+
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+@@ -659,6 +485,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->recovery_tmo = 120;
+ session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+ INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+ INIT_WORK(&session->block_work, __iscsi_block_session);
+@@ -671,57 +498,22 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
++ struct iscsi_host *ihost;
+ unsigned long flags;
+- unsigned int id = target_id;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+@@ -737,6 +529,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ list_add(&session->sess_list, &sesslist);
+ spin_unlock_irqrestore(&sesslock, flags);
+
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
++
+ iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+ return 0;
+
+@@ -750,18 +546,18 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+ * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+ */
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -797,7 +593,7 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+ int err;
+
+@@ -863,7 +659,6 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -876,17 +671,18 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * non-zero.
+ */
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+ unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -1219,20 +1015,21 @@ int iscsi_session_event(struct iscsi_cls_session *session,
+ EXPORT_SYMBOL_GPL(iscsi_session_event);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.cmds_max,
++ ev->u.c_session.queue_depth,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1307,7 +1104,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +1113,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1408,7 +1193,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,22 +1206,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+@@ -1568,8 +1337,11 @@ iscsi_if_rx(struct sk_buff *skb)
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1349,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1604,16 +1375,17 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+ iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+ iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
++
+ /*
+ * iSCSI session attrs
+ */
+ #define iscsi_session_attr_show(param, perm) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+ \
+ if (perm && !capable(CAP_SYS_ADMIN)) \
+@@ -1643,14 +1415,11 @@ iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+ iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+ iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+
+ static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_priv_session_state(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
+ return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+ }
+ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+@@ -1658,11 +1427,9 @@ static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1677,10 +1444,9 @@ iscsi_priv_session_attr(recovery_tmo, "%d");
+ */
+ #define iscsi_host_attr_show(param) \
+ static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_host_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
++ struct Scsi_Host *shost = transport_class_to_shost(cdev); \
+ struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+ return priv->iscsi_transport->get_host_param(shost, param, buf); \
+ }
+@@ -1697,7 +1463,7 @@ iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1471,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,7 +1479,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1721,7 +1487,7 @@ do { \
+ #define SETUP_HOST_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->host_attrs[count] = &class_device_attr_host_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,24 +1577,22 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
+ transport_container_register(&priv->t.host_attrs);
+
+ SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+@@ -1886,8 +1650,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+ SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+ SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+ SETUP_PRIV_SESSION_RD_ATTR(state);
+
+@@ -1901,9 +1663,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1691,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1712,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,8 +1724,8 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+@@ -1988,8 +1745,6 @@ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -2002,7 +1757,6 @@ static void __exit iscsi_transport_exit(void)
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..7b90b63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -52,7 +49,9 @@ struct device;
+ #endif
+
+ #define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_MGMT_CMDS_MAX 16 /* must be power of 2 */
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+@@ -70,10 +69,7 @@ enum {
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+@@ -86,6 +82,18 @@ enum {
+ ISCSI_DIGEST_SIZE = sizeof(__u32),
+ };
+
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ unsigned data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
++};
+
+ enum {
+ ISCSI_TASK_COMPLETED,
+@@ -93,7 +101,7 @@ enum {
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+ * Because LLDs allocate their hdr differently, this is a pointer
+ * and length to that storage. It must be setup at session
+@@ -110,7 +118,6 @@ struct iscsi_task {
+ /* offset in unsolicited stream (bytes); */
+ unsigned unsol_offset;
+ unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
+ struct iscsi_conn *conn; /* used connection */
+
+@@ -121,9 +128,9 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
++static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
+ {
+- return (void*)task->hdr + task->hdr_len;
++ return (void*)ctask->hdr + ctask->hdr_len;
+ }
+
+ /* Connection's states */
+@@ -139,6 +146,11 @@ struct iscsi_conn {
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+@@ -147,7 +159,7 @@ struct iscsi_conn {
+ unsigned long last_ping;
+ int ping_timeout;
+ int recv_timeout;
+- struct iscsi_task *ping_task;
++ struct iscsi_mgmt_task *ping_mtask;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,8 +175,9 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+ struct list_head mgmtqueue; /* mgmt (control) xmit queue */
+@@ -195,6 +208,9 @@ struct iscsi_conn {
+ /* remote portal currently connected to */
+ int portal_port;
+ char portal_address[ISCSI_ADDRESS_BUF_LEN];
++ /* local address */
++ int local_port;
++ char local_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,7 +225,6 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+@@ -230,7 +245,6 @@ enum {
+ };
+
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+ /*
+ * Syncs up the scsi eh thread with the iscsi eh thread when sending
+ * task management functions. This must be taken before the session
+@@ -266,8 +280,10 @@ struct iscsi_session {
+ char *password;
+ char *password_in;
+ char *targetname;
+- char *ifacename;
+ char *initiatorname;
++ /* hw address or netdev iscsi connection is bound to */
++ char *hwaddress;
++ char *netdev;
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +297,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
+ struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_pool mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -307,44 +315,42 @@ extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
++
+ /*
+ * iSCSI host helpers.
+ */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+ extern int iscsi_host_set_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf,
+ int buflen);
+ extern int iscsi_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+
+ /*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ uint16_t, uint16_t, int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
++
+ #define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++ iscsi_cls_session_printk(prefix, \
++ (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,29 +359,25 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+
+ #define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
++ iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+ extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+- char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
++extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
++extern void iscsi_free_mgmt_task(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+ /*
+ * generic helpers
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..aab1eae 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -30,11 +30,11 @@
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +58,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -86,9 +83,17 @@ struct iscsi_transport {
+ /* LLD sets this to indicate what values it can export to sysfs */
+ uint64_t param_mask;
+ uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint16_t, uint16_t,
++ uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -113,15 +118,20 @@ struct iscsi_transport {
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ int (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
+ int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+@@ -162,10 +172,9 @@ enum {
+ ISCSI_SESSION_FREE,
+ };
+
+-#define ISCSI_MAX_TARGET -1
+-
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+ spinlock_t lock;
+ struct work_struct block_work;
+@@ -177,7 +186,7 @@ struct iscsi_cls_session {
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+ int state;
+ int sid; /* session id */
+@@ -194,20 +203,12 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
++struct iscsi_host {
++ struct list_head sessions;
+ atomic_t nr_scans;
+ struct mutex mutex;
+ struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
++ char scan_workq_name[KOBJ_NAME_LEN];
+ };
+
+ /*
+@@ -221,26 +222,22 @@ struct iscsi_endpoint {
+
+ extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+ extern int iscsi_session_event(struct iscsi_cls_session *session,
+ enum iscsi_uevent_e event);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18_FC6/iscsi_02_count_fmr_align_violations.patch b/kernel_patches/backport/2.6.18_FC6/iscsi_02_count_fmr_align_violations.patch
new file mode 100644
index 0000000..9bf2d19
--- /dev/null
+++ b/kernel_patches/backport/2.6.18_FC6/iscsi_02_count_fmr_align_violations.patch
@@ -0,0 +1,24 @@
+From 02753dd2caabfe6b1885cb80a8fb8532b416108d Mon Sep 17 00:00:00 2001
+From: Eli Dorfman <elid at voltaire.com>
+Date: Tue, 29 Apr 2008 10:12:39 +0300
+Subject: [PATCH] IB/iSER: Count fmr alignment violations per session
+
+Count fmr alignment violations per session
+as part of the iscsi statistics.
+
+Signed-off-by: Eli Dorfman <elid at voltaire.com>
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 7b90b63..cd3ca63 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -225,6 +225,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_pool {
+--
+1.5.5
+
diff --git a/kernel_patches/backport/2.6.18_FC6/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.18_FC6/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
new file mode 100644
index 0000000..798571f
--- /dev/null
+++ b/kernel_patches/backport/2.6.18_FC6/iscsi_03_compat_patch_for_RHEL5_and_SLES10.patch
@@ -0,0 +1,151 @@
+From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 29 Jun 2008 15:41:12 +0300
+Subject: [PATCH] copmat patch for RHEL5 and SLES10
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
+ 1 files changed, 54 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index ca7bb6f..0ccd7e2 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -20,6 +20,8 @@
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
++#include <linux/version.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <net/tcp.h>
+@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
+ * the async scanning code (drivers like iscsi_tcp do login and
+ * scanning from userspace).
+ */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
++ if (shost->hostt->scan_finished) {
++ if (queue_work(ihost->scan_workq, &session->scan_work))
++ atomic_inc(&ihost->nr_scans);
++ }
++#endif
+ }
+
+ /**
+@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ * Malformed skbs with wrong lengths or invalid creds are not processed.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
+ return 0;
+
+ release_nls:
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+@@ -1753,7 +1768,7 @@ unregister_transport_class:
+ static void __exit iscsi_transport_exit(void)
+ {
+ destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18_FC6/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch b/kernel_patches/backport/2.6.18_FC6/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
deleted file mode 100644
index 798571f..0000000
--- a/kernel_patches/backport/2.6.18_FC6/iscsi_03_copmat_patch_for_RHEL5_and_SLES10.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From 66ab30f8dadef133bd04bbdcb434a7f742821bed Mon Sep 17 00:00:00 2001
-From: Doron Shoham <dorons at voltaire.com>
-Date: Sun, 29 Jun 2008 15:41:12 +0300
-Subject: [PATCH] copmat patch for RHEL5 and SLES10
-
-Signed-off-by: Doron Shoham <dorons at voltaire.com>
----
- drivers/scsi/scsi_transport_iscsi.c | 93 ++++++++++++++++++++---------------
- 1 files changed, 54 insertions(+), 39 deletions(-)
-
-diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
-index ca7bb6f..0ccd7e2 100644
---- a/drivers/scsi/scsi_transport_iscsi.c
-+++ b/drivers/scsi/scsi_transport_iscsi.c
-@@ -20,6 +20,8 @@
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-+#include <linux/version.h>
-+#include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/mutex.h>
- #include <net/tcp.h>
-@@ -397,10 +399,12 @@ static void __iscsi_unblock_session(struct work_struct *work)
- * the async scanning code (drivers like iscsi_tcp do login and
- * scanning from userspace).
- */
-- if (shost->hostt->scan_finished) {
-- if (queue_work(ihost->scan_workq, &session->scan_work))
-- atomic_inc(&ihost->nr_scans);
-- }
-+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
-+ if (shost->hostt->scan_finished) {
-+ if (queue_work(ihost->scan_workq, &session->scan_work))
-+ atomic_inc(&ihost->nr_scans);
-+ }
-+#endif
- }
-
- /**
-@@ -1294,45 +1298,56 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
- * Malformed skbs with wrong lengths or invalid creds are not processed.
- */
- static void
--iscsi_if_rx(struct sk_buff *skb)
-+iscsi_if_rx(struct sock *sk, int len)
- {
-+ struct sk_buff *skb;
-+
- mutex_lock(&rx_queue_mutex);
-- while (skb->len >= NLMSG_SPACE(0)) {
-- int err;
-- uint32_t rlen;
-- struct nlmsghdr *nlh;
-- struct iscsi_uevent *ev;
--
-- nlh = nlmsg_hdr(skb);
-- if (nlh->nlmsg_len < sizeof(*nlh) ||
-- skb->len < nlh->nlmsg_len) {
-- break;
-+ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-+ if (NETLINK_CREDS(skb)->uid) {
-+ skb_pull(skb, skb->len);
-+ goto free_skb;
- }
-
-- ev = NLMSG_DATA(nlh);
-- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-- if (rlen > skb->len)
-- rlen = skb->len;
-+ while (skb->len >= NLMSG_SPACE(0)) {
-+ int err;
-+ uint32_t rlen;
-+ struct nlmsghdr *nlh;
-+ struct iscsi_uevent *ev;
-
-- err = iscsi_if_recv_msg(skb, nlh);
-- if (err) {
-- ev->type = ISCSI_KEVENT_IF_ERROR;
-- ev->iferror = err;
-- }
-- do {
-- /*
-- * special case for GET_STATS:
-- * on success - sending reply and stats from
-- * inside of if_recv_msg(),
-- * on error - fall through.
-- */
-- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ nlh = nlmsg_hdr(skb);
-+ if (nlh->nlmsg_len < sizeof(*nlh) ||
-+ skb->len < nlh->nlmsg_len) {
- break;
-- err = iscsi_if_send_reply(
-- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-- } while (err < 0 && err != -ECONNREFUSED);
-- skb_pull(skb, rlen);
-+ }
-+
-+ ev = NLMSG_DATA(nlh);
-+ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
-+ if (rlen > skb->len)
-+ rlen = skb->len;
-+
-+ err = iscsi_if_recv_msg(skb, nlh);
-+ if (err) {
-+ ev->type = ISCSI_KEVENT_IF_ERROR;
-+ ev->iferror = err;
-+ }
-+ do {
-+ /*
-+ * special case for GET_STATS:
-+ * on success - sending reply and stats from
-+ * inside of if_recv_msg(),
-+ * on error - fall through.
-+ */
-+ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
-+ break;
-+ err = iscsi_if_send_reply(
-+ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
-+ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
-+ } while (err < 0 && err != -ECONNREFUSED);
-+ skb_pull(skb, rlen);
-+ }
-+free_skb:
-+ kfree_skb(skb);
- }
- mutex_unlock(&rx_queue_mutex);
- }
-@@ -1738,7 +1753,7 @@ static __init int iscsi_transport_init(void)
- return 0;
-
- release_nls:
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- unregister_session_class:
- transport_class_unregister(&iscsi_session_class);
- unregister_conn_class:
-@@ -1753,7 +1768,7 @@ unregister_transport_class:
- static void __exit iscsi_transport_exit(void)
- {
- destroy_workqueue(iscsi_eh_timer_workq);
-- netlink_kernel_release(nls);
-+ sock_release(nls->sk_socket);
- transport_class_unregister(&iscsi_connection_class);
- transport_class_unregister(&iscsi_session_class);
- transport_class_unregister(&iscsi_host_class);
---
-1.5.3.8
-
diff --git a/kernel_patches/backport/2.6.18_FC6/iser_01_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.18_FC6/iser_01_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..155795d
--- /dev/null
+++ b/kernel_patches/backport/2.6.18_FC6/iser_01_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From ad1e1df62ff096cc90257b0b42e843d0773ae981 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 11:37:50 +0300
+Subject: [PATCH] iser backports
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.18_FC6/iser_02_fix_iscsi_if.h b/kernel_patches/backport/2.6.18_FC6/iser_02_fix_iscsi_if.h
new file mode 100644
index 0000000..02c8a81
--- /dev/null
+++ b/kernel_patches/backport/2.6.18_FC6/iser_02_fix_iscsi_if.h
@@ -0,0 +1,145 @@
+From c703d2c0ca18a6a5b8f4ecbd5c02654a15fb11ff Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Tue, 26 Aug 2008 14:26:17 +0300
+Subject: [PATCH] fix iscsi_if.h
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/iscsi_if.h | 93 ++++++++++++++++++++++-------------------------
+ 1 files changed, 43 insertions(+), 50 deletions(-)
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..e19e584 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -50,7 +50,6 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+ ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+ ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+@@ -79,12 +78,6 @@ struct iscsi_uevent {
+ uint16_t cmds_max;
+ uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -257,49 +250,42 @@ enum iscsi_param {
+
+ ISCSI_PARAM_PING_TMO,
+ ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
++#define ISCSI_USERNAME (1 << ISCSI_PARAM_USERNAME)
++#define ISCSI_USERNAME_IN (1 << ISCSI_PARAM_USERNAME_IN)
++#define ISCSI_PASSWORD (1 << ISCSI_PARAM_PASSWORD)
++#define ISCSI_PASSWORD_IN (1 << ISCSI_PARAM_PASSWORD_IN)
++#define ISCSI_FAST_ABORT (1 << ISCSI_PARAM_FAST_ABORT)
++#define ISCSI_ABORT_TMO (1 << ISCSI_PARAM_ABORT_TMO)
++#define ISCSI_LU_RESET_TMO (1 << ISCSI_PARAM_LU_RESET_TMO)
++#define ISCSI_HOST_RESET_TMO (1 << ISCSI_PARAM_HOST_RESET_TMO)
++#define ISCSI_PING_TMO (1 << ISCSI_PARAM_PING_TMO)
++#define ISCSI_RECV_TMO (1 << ISCSI_PARAM_RECV_TMO)
+
+ /* iSCSI HBA params */
+ enum iscsi_host_param {
+@@ -310,13 +296,20 @@ enum iscsi_host_param {
+ ISCSI_HOST_PARAM_MAX,
+ };
+
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_HOST_HWADDRESS (1 << ISCSI_HOST_PARAM_HWADDRESS)
++#define ISCSI_HOST_INITIATOR_NAME (1 << ISCSI_HOST_PARAM_INITIATOR_NAME)
++#define ISCSI_HOST_NETDEV_NAME (1 << ISCSI_HOST_PARAM_NETDEV_NAME)
++#define ISCSI_HOST_IPADDRESS (1 << ISCSI_HOST_PARAM_IPADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U4/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch b/kernel_patches/backport/2.6.9_U4/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
new file mode 100644
index 0000000..e35b289
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
@@ -0,0 +1,9402 @@
+From f75042cdafb7f42cd1f9a244872ae2f7896e3278 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Wed, 20 Aug 2008 14:32:54 +0300
+Subject: [PATCH 1/1] iscsi_01_sync_kernel_code_with_ofed_1_2_5
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 2537 +++++++++++++++++++----------------
+ drivers/scsi/iscsi_tcp.h | 136 ++-
+ drivers/scsi/libiscsi.c | 2501 ++++++++++++----------------------
+ drivers/scsi/scsi_transport_iscsi.c | 1208 +++++------------
+ include/scsi/iscsi_if.h | 119 +--
+ include/scsi/iscsi_proto.h | 23 +-
+ include/scsi/libiscsi.h | 247 ++---
+ include/scsi/scsi_transport_iscsi.h | 148 +--
+ 8 files changed, 2862 insertions(+), 4057 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..c9a3abf 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -29,15 +29,14 @@
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/inet.h>
+-#include <linux/file.h>
+ #include <linux/blkdev.h>
+ #include <linux/crypto.h>
+ #include <linux/delay.h>
+ #include <linux/kfifo.h>
+ #include <linux/scatterlist.h>
++#include <linux/mutex.h>
+ #include <net/tcp.h>
+ #include <scsi/scsi_cmnd.h>
+-#include <scsi/scsi_device.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_transport_iscsi.h>
+@@ -48,7 +47,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus at yahoo.com>, "
+ "Alex Aizman <itn780 at yahoo.com>");
+ MODULE_DESCRIPTION("iSCSI/TCP data-path");
+ MODULE_LICENSE("GPL");
+-#undef DEBUG_TCP
++/* #define DEBUG_TCP */
+ #define DEBUG_ASSERT
+
+ #ifdef DEBUG_TCP
+@@ -64,515 +63,200 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+-static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment);
+-
+-/*
+- * Scatterlist handling: inside the iscsi_segment, we
+- * remember an index into the scatterlist, and set data/size
+- * to the current scatterlist entry. For highmem pages, we
+- * kmap as needed.
+- *
+- * Note that the page is unmapped when we return from
+- * TCP's data_ready handler, so we may end up mapping and
+- * unmapping the same page repeatedly. The whole reason
+- * for this is that we shouldn't keep the page mapped
+- * outside the softirq.
+- */
+-
+-/**
+- * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
+- * @segment: the buffer object
+- * @sg: scatterlist
+- * @offset: byte offset into that sg entry
+- *
+- * This function sets up the segment so that subsequent
+- * data is copied to the indicated sg entry, at the given
+- * offset.
+- */
+ static inline void
+-iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
+- struct scatterlist *sg, unsigned int offset)
++iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
+ {
+- segment->sg = sg;
+- segment->sg_offset = offset;
+- segment->size = min(sg->length - offset,
+- segment->total_size - segment->total_copied);
+- segment->data = NULL;
++ ibuf->sg.page = virt_to_page(vbuf);
++ ibuf->sg.offset = offset_in_page(vbuf);
++ ibuf->sg.length = size;
++ ibuf->sent = 0;
++ ibuf->use_sendmsg = 1;
+ }
+
+-/**
+- * iscsi_tcp_segment_map - map the current S/G page
+- * @segment: iscsi_segment
+- * @recv: 1 if called from recv path
+- *
+- * We only need to possibly kmap data if scatter lists are being used,
+- * because the iscsi passthrough and internal IO paths will never use high
+- * mem pages.
+- */
+ static inline void
+-iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
++iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
+ {
+- struct scatterlist *sg;
+-
+- if (segment->data != NULL || !segment->sg)
+- return;
+-
+- sg = segment->sg;
+- BUG_ON(segment->sg_mapped);
+- BUG_ON(sg->length == 0);
+-
++ ibuf->sg.page = sg->page;
++ ibuf->sg.offset = sg->offset;
++ ibuf->sg.length = sg->length;
+ /*
+- * If the page count is greater than one it is ok to send
+- * to the network layer's zero copy send path. If not we
+- * have to go the slow sendmsg path. We always map for the
+- * recv path.
++ * Fastpath: sg element fits into single page
+ */
+- if (page_count(sg_page(sg)) >= 1 && !recv)
+- return;
+-
+- debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
+- segment);
+- segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+- segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
++ if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
++ ibuf->use_sendmsg = 0;
++ else
++ ibuf->use_sendmsg = 1;
++ ibuf->sent = 0;
+ }
+
+-static inline void
+-iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
++static inline int
++iscsi_buf_left(struct iscsi_buf *ibuf)
+ {
+- debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
++ int rc;
+
+- if (segment->sg_mapped) {
+- debug_tcp("iscsi_tcp_segment_unmap valid\n");
+- kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+- segment->sg_mapped = NULL;
+- segment->data = NULL;
+- }
++ rc = ibuf->sg.length - ibuf->sent;
++ BUG_ON(rc < 0);
++ return rc;
+ }
+
+-/*
+- * Splice the digest buffer into the buffer
+- */
+ static inline void
+-iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
++iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
++ u8* crc)
+ {
+- segment->data = digest;
+- segment->digest_len = ISCSI_DIGEST_SIZE;
+- segment->total_size += ISCSI_DIGEST_SIZE;
+- segment->size = ISCSI_DIGEST_SIZE;
+- segment->copied = 0;
+- segment->sg = NULL;
+- segment->hash = NULL;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++
++ crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
++ buf->sg.length = tcp_conn->hdr_size;
+ }
+
+-/**
+- * iscsi_tcp_segment_done - check whether the segment is complete
+- * @segment: iscsi segment to check
+- * @recv: set to one of this is called from the recv path
+- * @copied: number of bytes copied
+- *
+- * Check if we're done receiving this segment. If the receive
+- * buffer is full but we expect more data, move on to the
+- * next entry in the scatterlist.
+- *
+- * If the amount of data we received isn't a multiple of 4,
+- * we will transparently receive the pad bytes, too.
+- *
+- * This function must be re-entrant.
+- */
+ static inline int
+-iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
++iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
+ {
+- static unsigned char padbuf[ISCSI_PAD_LEN];
+- struct scatterlist sg;
+- unsigned int pad;
++ struct sk_buff *skb = tcp_conn->in.skb;
++
++ tcp_conn->in.zero_copy_hdr = 0;
+
+- debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
+- segment->size, recv ? "recv" : "xmit");
+- if (segment->hash && copied) {
++ if (tcp_conn->in.copy >= tcp_conn->hdr_size &&
++ tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
+ /*
+- * If a segment is kmapd we must unmap it before sending
+- * to the crypto layer since that will try to kmap it again.
++ * Zero-copy PDU Header: using connection context
++ * to store header pointer.
+ */
+- iscsi_tcp_segment_unmap(segment);
+-
+- if (!segment->data) {
+- sg_init_table(&sg, 1);
+- sg_set_page(&sg, sg_page(segment->sg), copied,
+- segment->copied + segment->sg_offset +
+- segment->sg->offset);
+- } else
+- sg_init_one(&sg, segment->data + segment->copied,
+- copied);
+- crypto_hash_update(segment->hash, &sg, copied);
+- }
+-
+- segment->copied += copied;
+- if (segment->copied < segment->size) {
+- iscsi_tcp_segment_map(segment, recv);
+- return 0;
+- }
+-
+- segment->total_copied += segment->copied;
+- segment->copied = 0;
+- segment->size = 0;
+-
+- /* Unmap the current scatterlist page, if there is one. */
+- iscsi_tcp_segment_unmap(segment);
+-
+- /* Do we have more scatterlist entries? */
+- debug_tcp("total copied %u total size %u\n", segment->total_copied,
+- segment->total_size);
+- if (segment->total_copied < segment->total_size) {
+- /* Proceed to the next entry in the scatterlist. */
+- iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
+- 0);
+- iscsi_tcp_segment_map(segment, recv);
+- BUG_ON(segment->size == 0);
+- return 0;
+- }
+-
+- /* Do we need to handle padding? */
+- pad = iscsi_padding(segment->total_copied);
+- if (pad != 0) {
+- debug_tcp("consume %d pad bytes\n", pad);
+- segment->total_size += pad;
+- segment->size = pad;
+- segment->data = padbuf;
+- return 0;
+- }
+-
+- /*
+- * Set us up for transferring the data digest. hdr digest
+- * is completely handled in hdr done function.
+- */
+- if (segment->hash) {
+- crypto_hash_final(segment->hash, segment->digest);
+- iscsi_tcp_segment_splice_digest(segment,
+- recv ? segment->recv_digest : segment->digest);
+- return 0;
+- }
+-
+- return 1;
+-}
+-
+-/**
+- * iscsi_tcp_xmit_segment - transmit segment
+- * @tcp_conn: the iSCSI TCP connection
+- * @segment: the buffer to transmnit
+- *
+- * This function transmits as much of the buffer as
+- * the network layer will accept, and returns the number of
+- * bytes transmitted.
+- *
+- * If CRC hashing is enabled, the function will compute the
+- * hash as it goes. When the entire segment has been transmitted,
+- * it will retrieve the hash value and send it as well.
+- */
+-static int
+-iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct socket *sk = tcp_conn->sock;
+- unsigned int copied = 0;
+- int r = 0;
+-
+- while (!iscsi_tcp_segment_done(segment, 0, r)) {
+- struct scatterlist *sg;
+- unsigned int offset, copy;
+- int flags = 0;
+-
+- r = 0;
+- offset = segment->copied;
+- copy = segment->size - offset;
+-
+- if (segment->total_copied + segment->size < segment->total_size)
+- flags |= MSG_MORE;
+-
+- /* Use sendpage if we can; else fall back to sendmsg */
+- if (!segment->data) {
+- sg = segment->sg;
+- offset += segment->sg_offset + sg->offset;
+- r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
+- flags);
++ if (skb_shinfo(skb)->frag_list == NULL &&
++ !skb_shinfo(skb)->nr_frags) {
++ tcp_conn->in.hdr = (struct iscsi_hdr *)
++ ((char*)skb->data + tcp_conn->in.offset);
++ tcp_conn->in.zero_copy_hdr = 1;
+ } else {
+- struct msghdr msg = { .msg_flags = flags };
+- struct kvec iov = {
+- .iov_base = segment->data + offset,
+- .iov_len = copy
+- };
+-
+- r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
++ /* ignoring return code since we checked
++ * in.copy before */
++ skb_copy_bits(skb, tcp_conn->in.offset,
++ &tcp_conn->hdr, tcp_conn->hdr_size);
++ tcp_conn->in.hdr = &tcp_conn->hdr;
+ }
++ tcp_conn->in.offset += tcp_conn->hdr_size;
++ tcp_conn->in.copy -= tcp_conn->hdr_size;
++ } else {
++ int hdr_remains;
++ int copylen;
+
+- if (r < 0) {
+- iscsi_tcp_segment_unmap(segment);
+- if (copied || r == -EAGAIN)
+- break;
+- return r;
+- }
+- copied += r;
+- }
+- return copied;
+-}
+-
+-/**
+- * iscsi_tcp_segment_recv - copy data to segment
+- * @tcp_conn: the iSCSI TCP connection
+- * @segment: the buffer to copy to
+- * @ptr: data pointer
+- * @len: amount of data available
+- *
+- * This function copies up to @len bytes to the
+- * given buffer, and returns the number of bytes
+- * consumed, which can actually be less than @len.
+- *
+- * If hash digest is enabled, the function will update the
+- * hash while copying.
+- * Combining these two operations doesn't buy us a lot (yet),
+- * but in the future we could implement combined copy+crc,
+- * just way we do for network layer checksums.
+- */
+-static int
+-iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment, const void *ptr,
+- unsigned int len)
+-{
+- unsigned int copy = 0, copied = 0;
+-
+- while (!iscsi_tcp_segment_done(segment, 1, copy)) {
+- if (copied == len) {
+- debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
+- len);
+- break;
+- }
+-
+- copy = min(len - copied, segment->size - segment->copied);
+- debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
+- memcpy(segment->data + segment->copied, ptr + copied, copy);
+- copied += copy;
+- }
+- return copied;
+-}
+-
+-static inline void
+-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
+- unsigned char digest[ISCSI_DIGEST_SIZE])
+-{
+- struct scatterlist sg;
+-
+- sg_init_one(&sg, hdr, hdrlen);
+- crypto_hash_digest(hash, &sg, hdrlen, digest);
+-}
+-
+-static inline int
+-iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- if (!segment->digest_len)
+- return 1;
+-
+- if (memcmp(segment->recv_digest, segment->digest,
+- segment->digest_len)) {
+- debug_scsi("digest mismatch\n");
+- return 0;
+- }
++ /*
++ * PDU header scattered across SKB's,
++ * copying it... This'll happen quite rarely.
++ */
+
+- return 1;
+-}
++ if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER)
++ tcp_conn->in.hdr_offset = 0;
+
+-/*
+- * Helper function to set up segment buffer
+- */
+-static inline void
+-__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
+- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+-{
+- memset(segment, 0, sizeof(*segment));
+- segment->total_size = size;
+- segment->done = done;
++ hdr_remains = tcp_conn->hdr_size - tcp_conn->in.hdr_offset;
++ BUG_ON(hdr_remains <= 0);
+
+- if (hash) {
+- segment->hash = hash;
+- crypto_hash_init(hash);
+- }
+-}
++ copylen = min(tcp_conn->in.copy, hdr_remains);
++ skb_copy_bits(skb, tcp_conn->in.offset,
++ (char*)&tcp_conn->hdr + tcp_conn->in.hdr_offset,
++ copylen);
+
+-static inline void
+-iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
+- size_t size, iscsi_segment_done_fn_t *done,
+- struct hash_desc *hash)
+-{
+- __iscsi_segment_init(segment, size, done, hash);
+- segment->data = data;
+- segment->size = size;
+-}
++ debug_tcp("PDU gather offset %d bytes %d in.offset %d "
++ "in.copy %d\n", tcp_conn->in.hdr_offset, copylen,
++ tcp_conn->in.offset, tcp_conn->in.copy);
+
+-static inline int
+-iscsi_segment_seek_sg(struct iscsi_segment *segment,
+- struct scatterlist *sg_list, unsigned int sg_count,
+- unsigned int offset, size_t size,
+- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+-{
+- struct scatterlist *sg;
+- unsigned int i;
+-
+- debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
+- offset, size);
+- __iscsi_segment_init(segment, size, done, hash);
+- for_each_sg(sg_list, sg, sg_count, i) {
+- debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
+- sg->offset);
+- if (offset < sg->length) {
+- iscsi_tcp_segment_init_sg(segment, sg, offset);
+- return 0;
++ tcp_conn->in.offset += copylen;
++ tcp_conn->in.copy -= copylen;
++ if (copylen < hdr_remains) {
++ tcp_conn->in_progress = IN_PROGRESS_HEADER_GATHER;
++ tcp_conn->in.hdr_offset += copylen;
++ return -EAGAIN;
+ }
+- offset -= sg->length;
++ tcp_conn->in.hdr = &tcp_conn->hdr;
++ tcp_conn->discontiguous_hdr_cnt++;
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ }
+
+- return ISCSI_ERR_DATA_OFFSET;
+-}
+-
+-/**
+- * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
+- * @tcp_conn: iscsi connection to prep for
+- *
+- * This function always passes NULL for the hash argument, because when this
+- * function is called we do not yet know the final size of the header and want
+- * to delay the digest processing until we know that.
+- */
+-static void
+-iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+-{
+- debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
+- tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
+- iscsi_segment_init_linear(&tcp_conn->in.segment,
+- tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
+- iscsi_tcp_hdr_recv_done, NULL);
+-}
+-
+-/*
+- * Handle incoming reply to any other type of command
+- */
+-static int
+-iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- int rc = 0;
+-
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_DATA_DGST;
+-
+- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
+- conn->data, tcp_conn->in.datalen);
+- if (rc)
+- return rc;
+-
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+ }
+
+-static void
+-iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct hash_desc *rx_hash = NULL;
+-
+- if (conn->datadgst_en)
+- rx_hash = &tcp_conn->rx_hash;
+-
+- iscsi_segment_init_linear(&tcp_conn->in.segment,
+- conn->data, tcp_conn->in.datalen,
+- iscsi_tcp_data_recv_done, rx_hash);
+-}
+-
+ /*
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
++ struct scsi_cmnd *sc;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
+- if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
+- tcp_task->r2t = NULL;
+- }
++ sc = ctask->sc;
++ if (unlikely(!sc))
++ return;
++
++ tcp_ctask->xmstate = XMSTATE_IDLE;
++ tcp_ctask->r2t = NULL;
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
++ int rc;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc)
++ return rc;
++ /*
++ * setup Data-In byte counter (gets decremented..)
++ */
++ ctask->data_count = tcp_conn->in.datalen;
++
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (ctask->datasn != datasn)
+ return ISCSI_ERR_DATASN;
+- }
+
+- tcp_task->exp_datasn++;
++ ctask->datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
+- debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > ctask->total_length)
+ return ISCSI_ERR_DATA_OFFSET;
+- }
+
+ if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ struct scsi_cmnd *sc = ctask->sc;
++
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+- if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
+- ISCSI_FLAG_DATA_OVERFLOW)) {
++ if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+ if (res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
+- else
++ res_count <= sc->request_bufflen) {
++ sc->resid = res_count;
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ } else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
++ sc->resid = be32_to_cpu(rhdr->residual_count);
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ } else
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ }
+
+ conn->datain_pdus_cnt++;
+@@ -582,7 +266,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,10 +276,11 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ hdr = &r2t->dtask.hdr;
+ memset(hdr, 0, sizeof(struct iscsi_data));
+@@ -603,8 +288,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -619,57 +304,94 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ conn->dataout_pdus_cnt++;
+
+ r2t->sent = 0;
++
++ iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
++ sizeof(struct iscsi_hdr));
++
++ if (sc->use_sg) {
++ int i, sg_count = 0;
++ struct scatterlist *sg = sc->request_buffer;
++
++ r2t->sg = NULL;
++ for (i = 0; i < sc->use_sg; i++, sg += 1) {
++ /* FIXME: prefetch ? */
++ if (sg_count + sg->length > r2t->data_offset) {
++ int page_offset;
++
++ /* sg page found! */
++
++ /* offset within this page */
++ page_offset = r2t->data_offset - sg_count;
++
++ /* fill in this buffer */
++ iscsi_buf_init_sg(&r2t->sendbuf, sg);
++ r2t->sendbuf.sg.offset += page_offset;
++ r2t->sendbuf.sg.length -= page_offset;
++
++ /* xmit logic will continue with next one */
++ r2t->sg = sg + 1;
++ break;
++ }
++ sg_count += sg->length;
++ }
++ BUG_ON(r2t->sg == NULL);
++ } else {
++ iscsi_buf_init_iov(&r2t->sendbuf,
++ (char*)sc->request_buffer + r2t->data_offset,
++ r2t->data_count);
++ r2t->sg = NULL;
++ }
+ }
+
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ int rc;
+
+ if (tcp_conn->in.datalen) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2t with datalen %d\n",
+- tcp_conn->in.datalen);
++ printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
++ tcp_conn->in.datalen);
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
+ return ISCSI_ERR_R2TSN;
+- }
+
+- /* fill-in new R2T associated with the task */
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc)
++ return rc;
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+- iscsi_conn_printk(KERN_INFO, conn,
+- "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ /* FIXME: use R2TSN to detect missing R2T */
++
++ /* fill-in new R2T associated with the task */
++ spin_lock(&session->lock);
++ if (!ctask->sc || ctask->mtask ||
++ session->state != ISCSI_STATE_LOGGED_IN) {
++ printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
++ "recovery...\n", ctask->itt);
++ spin_unlock(&session->lock);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+ r2t->data_length = be32_to_cpu(rhdr->data_length);
+ if (r2t->data_length == 0) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
++ printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
++ spin_unlock(&session->lock);
+ return ISCSI_ERR_DATALEN;
+ }
+
+@@ -679,159 +401,122 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2T with data len %u at offset %u "
+- "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
++ if (r2t->data_offset + r2t->data_length > ctask->total_length) {
++ spin_unlock(&session->lock);
++ printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
++ "offset %u and total length %d\n", r2t->data_length,
++ r2t->data_offset, ctask->total_length);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+- conn->r2t_pdus_cnt++;
++ tcp_ctask->exp_r2tsn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
++ list_move_tail(&ctask->running, &conn->xmitqueue);
+
+- iscsi_requeue_task(task);
+- return 0;
+-}
+-
+-/*
+- * Handle incoming reply to DataIn command
+- */
+-static int
+-iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct iscsi_hdr *hdr = tcp_conn->in.hdr;
+- int rc;
+-
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_DATA_DGST;
+-
+- /* check for non-exceptional status */
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
+- if (rc)
+- return rc;
+- }
++ scsi_queue_work(session->host, &conn->xmitwork);
++ conn->r2t_pdus_cnt++;
++ spin_unlock(&session->lock);
+
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+ }
+
+-/**
+- * iscsi_tcp_hdr_dissect - process PDU header
+- * @conn: iSCSI connection
+- * @hdr: PDU header
+- *
+- * This function analyzes the header of the PDU received,
+- * and performs several sanity checks. If the PDU is accompanied
+- * by data, the receive buffer is set up to copy the incoming data
+- * to the correct location.
+- */
+ static int
+-iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
++iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_hdr *hdr;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ uint32_t cdgst, rdgst = 0, itt;
++
++ hdr = tcp_conn->in.hdr;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+ if (tcp_conn->in.datalen > conn->max_recv_dlength) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "iscsi_tcp: datalen %d > %d\n",
+- tcp_conn->in.datalen, conn->max_recv_dlength);
++ printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
++ tcp_conn->in.datalen, conn->max_recv_dlength);
+ return ISCSI_ERR_DATALEN;
+ }
++ tcp_conn->data_copied = 0;
+
+- /* Additional header segments. So far, we don't
+- * process additional headers.
+- */
++ /* read AHS */
+ ahslen = hdr->hlength << 2;
++ tcp_conn->in.offset += ahslen;
++ tcp_conn->in.copy -= ahslen;
++ if (tcp_conn->in.copy < 0) {
++ printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
++ "%d bytes\n", ahslen);
++ return ISCSI_ERR_AHSLEN;
++ }
++
++ /* calculate read padding */
++ tcp_conn->in.padding = tcp_conn->in.datalen & (ISCSI_PAD_LEN-1);
++ if (tcp_conn->in.padding) {
++ tcp_conn->in.padding = ISCSI_PAD_LEN - tcp_conn->in.padding;
++ debug_scsi("read padding %d bytes\n", tcp_conn->in.padding);
++ }
++
++ if (conn->hdrdgst_en) {
++ struct scatterlist sg;
++
++ sg_init_one(&sg, (u8 *)hdr,
++ sizeof(struct iscsi_hdr) + ahslen);
++ crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
++ (u8 *)&cdgst);
++ rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
++ ahslen);
++ if (cdgst != rdgst) {
++ printk(KERN_ERR "iscsi_tcp: hdrdgst error "
++ "recv 0x%x calc 0x%x\n", rdgst, cdgst);
++ return ISCSI_ERR_HDR_DGST;
++ }
++ }
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
++ rc = iscsi_verify_itt(conn, hdr, &itt);
++ if (rc == ISCSI_ERR_NO_SCSI_CMD) {
++ tcp_conn->in.datalen = 0; /* force drop */
++ return 0;
++ } else if (rc)
+ return rc;
+
+- debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
+- opcode, ahslen, tcp_conn->in.datalen);
++ debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
++ opcode, tcp_conn->in.offset, tcp_conn->in.copy,
++ ahslen, tcp_conn->in.datalen);
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
+- if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+-
+- /*
+- * Setup copy of Data-In into the Scsi_Cmnd
+- * Scatterlist case:
+- * We set up the iscsi_segment to point to the next
+- * scatterlist entry to copy to. As we go along,
+- * we move on to the next scatterlist entry and
+- * update the digest per-entry.
+- */
+- if (conn->datadgst_en)
+- rx_hash = &tcp_conn->rx_hash;
+-
+- debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+- "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
++ tcp_conn->in.ctask = session->cmds[itt];
++ rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
++ if (rc)
+ return rc;
+- }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+- if (tcp_conn->in.datalen) {
+- iscsi_tcp_data_recv_prep(tcp_conn);
+- return 0;
+- }
+- rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
++ tcp_conn->in.ctask = session->cmds[itt];
++ if (tcp_conn->in.datalen)
++ goto copy_hdr;
++
++ spin_lock(&session->lock);
++ rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
++ spin_unlock(&session->lock);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ tcp_conn->in.ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
++ else if (tcp_conn->in.ctask->sc->sc_data_direction ==
++ DMA_TO_DEVICE)
++ rc = iscsi_r2t_rsp(conn, tcp_conn->in.ctask);
+ else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -842,24 +527,18 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ * than 8K, but there are no targets that currently do this.
+ * For now we fail until we find a vendor that needs it
+ */
+- if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "iscsi_tcp: received buffer of "
+- "len %u but conn buffer is only %u "
+- "(opcode %0x)\n",
+- tcp_conn->in.datalen,
+- ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
++ if (ISCSI_DEF_MAX_RECV_SEG_LEN <
++ tcp_conn->in.datalen) {
++ printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
++ "but conn buffer is only %u (opcode %0x)\n",
++ tcp_conn->in.datalen,
++ ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+- /* If there's data coming in with the response,
+- * receive it to the connection's buffer.
+- */
+- if (tcp_conn->in.datalen) {
+- iscsi_tcp_data_recv_prep(tcp_conn);
+- return 0;
+- }
++ if (tcp_conn->in.datalen)
++ goto copy_hdr;
+ /* fall through */
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_NOOP_IN:
+@@ -871,161 +550,457 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ break;
+ }
+
+- if (rc == 0) {
+- /* Anything that comes with data should have
+- * been handled above. */
+- if (tcp_conn->in.datalen)
+- return ISCSI_ERR_PROTO;
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
++ return rc;
++
++copy_hdr:
++ /*
++ * if we did zero copy for the header but we will need multiple
++ * skbs to complete the command then we have to copy the header
++ * for later use
++ */
++ if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
++ (tcp_conn->in.datalen + tcp_conn->in.padding +
++ (conn->datadgst_en ? 4 : 0))) {
++ debug_tcp("Copying header for later use. in.copy %d in.datalen"
++ " %d\n", tcp_conn->in.copy, tcp_conn->in.datalen);
++ memcpy(&tcp_conn->hdr, tcp_conn->in.hdr,
++ sizeof(struct iscsi_hdr));
++ tcp_conn->in.hdr = &tcp_conn->hdr;
++ tcp_conn->in.zero_copy_hdr = 0;
+ }
++ return 0;
++}
+
+- return rc;
++/**
++ * iscsi_ctask_copy - copy skb bits to the destanation cmd task
++ * @conn: iscsi tcp connection
++ * @ctask: scsi command task
++ * @buf: buffer to copy to
++ * @buf_size: size of buffer
++ * @offset: offset within the buffer
++ *
++ * Notes:
++ * The function calls skb_copy_bits() and updates per-connection and
++ * per-cmd byte counters.
++ *
++ * Read counters (in bytes):
++ *
++ * conn->in.offset offset within in progress SKB
++ * conn->in.copy left to copy from in progress SKB
++ * including padding
++ * conn->in.copied copied already from in progress SKB
++ * conn->data_copied copied already from in progress buffer
++ * ctask->sent total bytes sent up to the MidLayer
++ * ctask->data_count left to copy from in progress Data-In
++ * buf_left left to copy from in progress buffer
++ **/
++static inline int
++iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
++ void *buf, int buf_size, int offset)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int buf_left = buf_size - (tcp_conn->data_copied + offset);
++ int size = min(tcp_conn->in.copy, buf_left);
++ int rc;
++
++ size = min(size, ctask->data_count);
++
++ debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
++ size, tcp_conn->in.offset, tcp_conn->in.copied);
++
++ BUG_ON(size <= 0);
++ BUG_ON(tcp_ctask->sent + size > ctask->total_length);
++
++ rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
++ (char*)buf + (offset + tcp_conn->data_copied), size);
++ /* must fit into skb->len */
++ BUG_ON(rc);
++
++ tcp_conn->in.offset += size;
++ tcp_conn->in.copy -= size;
++ tcp_conn->in.copied += size;
++ tcp_conn->data_copied += size;
++ tcp_ctask->sent += size;
++ ctask->data_count -= size;
++
++ BUG_ON(tcp_conn->in.copy < 0);
++ BUG_ON(ctask->data_count < 0);
++
++ if (buf_size != (tcp_conn->data_copied + offset)) {
++ if (!ctask->data_count) {
++ BUG_ON(buf_size - tcp_conn->data_copied < 0);
++ /* done with this PDU */
++ return buf_size - tcp_conn->data_copied;
++ }
++ return -EAGAIN;
++ }
++
++ /* done with this buffer or with both - PDU and buffer */
++ tcp_conn->data_copied = 0;
++ return 0;
+ }
+
+ /**
+- * iscsi_tcp_hdr_recv_done - process PDU header
++ * iscsi_tcp_copy - copy skb bits to the destanation buffer
++ * @conn: iscsi tcp connection
+ *
+- * This is the callback invoked when the PDU header has
+- * been received. If the header is followed by additional
+- * header segments, we go back for more data.
+- */
+-static int
+-iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
++ * Notes:
++ * The function calls skb_copy_bits() and updates per-connection
++ * byte counters.
++ **/
++static inline int
++iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
+ {
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct iscsi_hdr *hdr;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int buf_left = buf_size - tcp_conn->data_copied;
++ int size = min(tcp_conn->in.copy, buf_left);
++ int rc;
++
++ debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
++ size, tcp_conn->in.offset, tcp_conn->data_copied);
++ BUG_ON(size <= 0);
++
++ rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
++ (char*)conn->data + tcp_conn->data_copied, size);
++ BUG_ON(rc);
++
++ tcp_conn->in.offset += size;
++ tcp_conn->in.copy -= size;
++ tcp_conn->in.copied += size;
++ tcp_conn->data_copied += size;
++
++ if (buf_size != tcp_conn->data_copied)
++ return -EAGAIN;
++
++ return 0;
++}
++
++static inline void
++partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
++ int offset, int length)
++{
++ struct scatterlist temp;
+
+- /* Check if there are additional header segments
+- * *prior* to computing the digest, because we
+- * may need to go back to the caller for more.
++ memcpy(&temp, sg, sizeof(struct scatterlist));
++ temp.offset = offset;
++ temp.length = length;
++ crypto_hash_update(desc, &temp, length);
++}
++
++static void
++iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
++{
++ struct scatterlist tmp;
++
++ sg_init_one(&tmp, buf, len);
++ crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
++}
++
++static int iscsi_scsi_data_in(struct iscsi_conn *conn)
++{
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ struct iscsi_cmd_task *ctask = tcp_conn->in.ctask;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
++ struct scatterlist *sg;
++ int i, offset, rc = 0;
++
++ BUG_ON((void*)ctask != sc->SCp.ptr);
++
++ /*
++ * copying Data-In into the Scsi_Cmnd
+ */
+- hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
+- if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
+- /* Bump the header length - the caller will
+- * just loop around and get the AHS for us, and
+- * call again. */
+- unsigned int ahslen = hdr->hlength << 2;
+-
+- /* Make sure we don't overflow */
+- if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
+- return ISCSI_ERR_AHSLEN;
+-
+- segment->total_size += ahslen;
+- segment->size += ahslen;
+- return 0;
++ if (!sc->use_sg) {
++ i = ctask->data_count;
++ rc = iscsi_ctask_copy(tcp_conn, ctask, sc->request_buffer,
++ sc->request_bufflen,
++ tcp_ctask->data_offset);
++ if (rc == -EAGAIN)
++ return rc;
++ if (conn->datadgst_en)
++ iscsi_recv_digest_update(tcp_conn, sc->request_buffer,
++ i);
++ rc = 0;
++ goto done;
+ }
+
+- /* We're done processing the header. See if we're doing
+- * header digests; if so, set up the recv_digest buffer
+- * and go back for more. */
+- if (conn->hdrdgst_en) {
+- if (segment->digest_len == 0) {
+- iscsi_tcp_segment_splice_digest(segment,
+- segment->recv_digest);
+- return 0;
++ offset = tcp_ctask->data_offset;
++ sg = sc->request_buffer;
++
++ if (tcp_ctask->data_offset)
++ for (i = 0; i < tcp_ctask->sg_count; i++)
++ offset -= sg[i].length;
++ /* we've passed through partial sg*/
++ if (offset < 0)
++ offset = 0;
++
++ for (i = tcp_ctask->sg_count; i < sc->use_sg; i++) {
++ char *dest;
++
++ dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
++ rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
++ sg[i].length, offset);
++ kunmap_atomic(dest, KM_SOFTIRQ0);
++ if (rc == -EAGAIN)
++ /* continue with the next SKB/PDU */
++ return rc;
++ if (!rc) {
++ if (conn->datadgst_en) {
++ if (!offset)
++ crypto_hash_update(
++ &tcp_conn->rx_hash,
++ &sg[i], sg[i].length);
++ else
++ partial_sg_digest_update(
++ &tcp_conn->rx_hash,
++ &sg[i],
++ sg[i].offset + offset,
++ sg[i].length - offset);
++ }
++ offset = 0;
++ tcp_ctask->sg_count++;
+ }
+- iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
+- segment->total_copied - ISCSI_DIGEST_SIZE,
+- segment->digest);
+
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_HDR_DGST;
++ if (!ctask->data_count) {
++ if (rc && conn->datadgst_en)
++ /*
++ * data-in is complete, but buffer not...
++ */
++ partial_sg_digest_update(&tcp_conn->rx_hash,
++ &sg[i],
++ sg[i].offset,
++ sg[i].length-rc);
++ rc = 0;
++ break;
++ }
++
++ if (!tcp_conn->in.copy)
++ return -EAGAIN;
+ }
++ BUG_ON(ctask->data_count);
+
+- tcp_conn->in.hdr = hdr;
+- return iscsi_tcp_hdr_dissect(conn, hdr);
++done:
++ /* check for non-exceptional status */
++ if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
++ (long)sc, sc->result, ctask->itt,
++ tcp_conn->in.hdr->flags);
++ spin_lock(&conn->session->lock);
++ __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
++ spin_unlock(&conn->session->lock);
++ }
++
++ return rc;
++}
++
++static int
++iscsi_data_recv(struct iscsi_conn *conn)
++{
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int rc = 0, opcode;
++
++ opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
++ switch (opcode) {
++ case ISCSI_OP_SCSI_DATA_IN:
++ rc = iscsi_scsi_data_in(conn);
++ break;
++ case ISCSI_OP_SCSI_CMD_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_ASYNC_EVENT:
++ case ISCSI_OP_REJECT:
++ /*
++ * Collect data segment to the connection's data
++ * placeholder
++ */
++ if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
++ rc = -EAGAIN;
++ goto exit;
++ }
++
++ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
++ tcp_conn->in.datalen);
++ if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
++ iscsi_recv_digest_update(tcp_conn, conn->data,
++ tcp_conn->in.datalen);
++ break;
++ default:
++ BUG_ON(1);
++ }
++exit:
++ return rc;
+ }
+
+ /**
+- * iscsi_tcp_recv - TCP receive in sendfile fashion
++ * iscsi_tcp_data_recv - TCP receive in sendfile fashion
+ * @rd_desc: read descriptor
+ * @skb: socket buffer
+ * @offset: offset in skb
+ * @len: skb->len - offset
+ **/
+ static int
+-iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+- unsigned int offset, size_t len)
++iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
++ unsigned int offset, size_t len)
+ {
++ int rc;
+ struct iscsi_conn *conn = rd_desc->arg.data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->in.segment;
+- struct skb_seq_state seq;
+- unsigned int consumed = 0;
+- int rc = 0;
++ int processed;
++ char pad[ISCSI_PAD_LEN];
++ struct scatterlist sg;
+
+- debug_tcp("in %d bytes\n", skb->len - offset);
++ /*
++ * Save current SKB and its offset in the corresponding
++ * connection context.
++ */
++ tcp_conn->in.copy = skb->len - offset;
++ tcp_conn->in.offset = offset;
++ tcp_conn->in.skb = skb;
++ tcp_conn->in.len = tcp_conn->in.copy;
++ BUG_ON(tcp_conn->in.copy <= 0);
++ debug_tcp("in %d bytes\n", tcp_conn->in.copy);
++
++more:
++ tcp_conn->in.copied = 0;
++ rc = 0;
+
+ if (unlikely(conn->suspend_rx)) {
+ debug_tcp("conn %d Rx suspended!\n", conn->id);
+ return 0;
+ }
+
+- skb_prepare_seq_read(skb, offset, skb->len, &seq);
+- while (1) {
+- unsigned int avail;
+- const u8 *ptr;
++ if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
++ tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
++ rc = iscsi_hdr_extract(tcp_conn);
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto nomore;
++ else {
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++ }
+
+- avail = skb_seq_read(consumed, &ptr, &seq);
+- if (avail == 0) {
+- debug_tcp("no more data avail. Consumed %d\n",
+- consumed);
+- break;
++ /*
++ * Verify and process incoming PDU header.
++ */
++ rc = iscsi_tcp_hdr_recv(conn);
++ if (!rc && tcp_conn->in.datalen) {
++ if (conn->datadgst_en)
++ crypto_hash_init(&tcp_conn->rx_hash);
++ tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
++ } else if (rc) {
++ iscsi_conn_failure(conn, rc);
++ return 0;
+ }
+- BUG_ON(segment->copied >= segment->size);
+-
+- debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
+- rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
+- BUG_ON(rc == 0);
+- consumed += rc;
+-
+- if (segment->total_copied >= segment->total_size) {
+- debug_tcp("segment done\n");
+- rc = segment->done(tcp_conn, segment);
+- if (rc != 0) {
+- skb_abort_seq_read(&seq);
+- goto error;
+- }
++ }
++
++ if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
++ uint32_t recv_digest;
+
+- /* The done() functions sets up the
+- * next segment. */
++ debug_tcp("extra data_recv offset %d copy %d\n",
++ tcp_conn->in.offset, tcp_conn->in.copy);
++ rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto again;
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++
++ memcpy(&recv_digest, conn->data, sizeof(uint32_t));
++ if (recv_digest != tcp_conn->in.datadgst) {
++ debug_tcp("iscsi_tcp: data digest error!"
++ "0x%x != 0x%x\n", recv_digest,
++ tcp_conn->in.datadgst);
++ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
++ return 0;
++ } else {
++ debug_tcp("iscsi_tcp: data digest match!"
++ "0x%x == 0x%x\n", recv_digest,
++ tcp_conn->in.datadgst);
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ }
+ }
+- skb_abort_seq_read(&seq);
+- conn->rxdata_octets += consumed;
+- return consumed;
+
+-error:
+- debug_tcp("Error receiving PDU, errno=%d\n", rc);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- return 0;
++ if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV &&
++ tcp_conn->in.copy) {
++
++ debug_tcp("data_recv offset %d copy %d\n",
++ tcp_conn->in.offset, tcp_conn->in.copy);
++
++ rc = iscsi_data_recv(conn);
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto again;
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++ tcp_conn->in.copy -= tcp_conn->in.padding;
++ tcp_conn->in.offset += tcp_conn->in.padding;
++ if (conn->datadgst_en) {
++ if (tcp_conn->in.padding) {
++ debug_tcp("padding -> %d\n",
++ tcp_conn->in.padding);
++ memset(pad, 0, tcp_conn->in.padding);
++ sg_init_one(&sg, pad, tcp_conn->in.padding);
++ crypto_hash_update(&tcp_conn->rx_hash,
++ &sg, sg.length);
++ }
++ crypto_hash_final(&tcp_conn->rx_hash,
++ (u8 *) &tcp_conn->in.datadgst);
++ debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
++ tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
++ tcp_conn->data_copied = 0;
++ } else
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++ }
++
++ debug_tcp("f, processed %d from out of %d padding %d\n",
++ tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding);
++ BUG_ON(tcp_conn->in.offset - offset > len);
++
++ if (tcp_conn->in.offset - offset != len) {
++ debug_tcp("continue to process %d bytes\n",
++ (int)len - (tcp_conn->in.offset - offset));
++ goto more;
++ }
++
++nomore:
++ processed = tcp_conn->in.offset - offset;
++ BUG_ON(processed == 0);
++ return processed;
++
++again:
++ processed = tcp_conn->in.offset - offset;
++ debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
++ processed, (int)len, (int)rd_desc->count);
++ BUG_ON(processed == 0);
++ BUG_ON(processed > len);
++
++ conn->rxdata_octets += processed;
++ return processed;
+ }
+
+ static void
+ iscsi_tcp_data_ready(struct sock *sk, int flag)
+ {
+ struct iscsi_conn *conn = sk->sk_user_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ read_descriptor_t rd_desc;
+
+ read_lock(&sk->sk_callback_lock);
+
+ /*
+- * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
++ * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv.
+ * We set count to 1 because we want the network layer to
+- * hand us all the skbs that are available. iscsi_tcp_recv
++ * hand us all the skbs that are available. iscsi_tcp_data_recv
+ * handled pdus that cross buffers or pdus that still need data.
+ */
+ rd_desc.arg.data = conn;
+ rd_desc.count = 1;
+- tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
++ tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
+
+ read_unlock(&sk->sk_callback_lock);
+-
+- /* If we had to (atomically) map a highmem page,
+- * unmap it now. */
+- iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+ }
+
+ static void
+@@ -1105,179 +1080,127 @@ iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
+ }
+
+ /**
+- * iscsi_xmit - TCP transmit
+- **/
+-static int
+-iscsi_xmit(struct iscsi_conn *conn)
++ * iscsi_send - generic send routine
++ * @sk: kernel's socket
++ * @buf: buffer to write from
++ * @size: actual size to write
++ * @flags: socket's flags
++ */
++static inline int
++iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->out.segment;
+- unsigned int consumed = 0;
+- int rc = 0;
+-
+- while (1) {
+- rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
+- if (rc < 0)
+- goto error;
+- if (rc == 0)
+- break;
+-
+- consumed += rc;
++ struct socket *sk = tcp_conn->sock;
++ int offset = buf->sg.offset + buf->sent, res;
+
+- if (segment->total_copied >= segment->total_size) {
+- if (segment->done != NULL) {
+- rc = segment->done(tcp_conn, segment);
+- if (rc < 0)
+- goto error;
+- }
+- }
++ /*
++ * if we got use_sg=0 or are sending something we kmallocd
++ * then we did not have to do kmap (kmap returns page_address)
++ *
++ * if we got use_sg > 0, but had to drop down, we do not
++ * set clustering so this should only happen for that
++ * slab case.
++ */
++ if (buf->use_sendmsg)
++ res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
++ else
++ res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
++
++ if (res >= 0) {
++ conn->txdata_octets += res;
++ buf->sent += res;
++ return res;
+ }
+
+- debug_tcp("xmit %d bytes\n", consumed);
+-
+- conn->txdata_octets += consumed;
+- return consumed;
+-
+-error:
+- /* Transmit error. We could initiate error recovery
+- * here. */
+- debug_tcp("Error sending PDU, errno=%d\n", rc);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- return rc;
++ tcp_conn->sendpage_failures_cnt++;
++ if (res == -EAGAIN)
++ res = -ENOBUFS;
++ else
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return res;
+ }
+
+ /**
+- * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+- */
+-static inline int
+-iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
+-{
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->out.segment;
+-
+- return segment->total_copied - segment->total_size;
+-}
+-
++ * iscsi_sendhdr - send PDU Header via tcp_sendpage()
++ * @conn: iscsi connection
++ * @buf: buffer to write from
++ * @datalen: lenght of data to be sent after the header
++ *
++ * Notes:
++ * (Tx, Fast Path)
++ **/
+ static inline int
+-iscsi_tcp_flush(struct iscsi_conn *conn)
++iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
+ {
+- int rc;
+-
+- while (iscsi_tcp_xmit_qlen(conn)) {
+- rc = iscsi_xmit(conn);
+- if (rc == 0)
++ int flags = 0; /* MSG_DONTWAIT; */
++ int res, size;
++
++ size = buf->sg.length - buf->sent;
++ BUG_ON(buf->sent + size > buf->sg.length);
++ if (buf->sent + size != buf->sg.length || datalen)
++ flags |= MSG_MORE;
++
++ res = iscsi_send(conn, buf, size, flags);
++ debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
++ if (res >= 0) {
++ if (size != res)
+ return -EAGAIN;
+- if (rc < 0)
+- return rc;
++ return 0;
+ }
+
+- return 0;
+-}
+-
+-/*
+- * This is called when we're done sending the header.
+- * Simply copy the data_segment to the send segment, and return.
+- */
+-static int
+-iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- tcp_conn->out.segment = tcp_conn->out.data_segment;
+- debug_tcp("Header done. Next segment size %u total_size %u\n",
+- tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
+- return 0;
++ return res;
+ }
+
+-static void
+-iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
++/**
++ * iscsi_sendpage - send one page of iSCSI Data-Out.
++ * @conn: iscsi connection
++ * @buf: buffer to write from
++ * @count: remaining data
++ * @sent: number of bytes sent
++ *
++ * Notes:
++ * (Tx, Fast Path)
++ **/
++static inline int
++iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
++ int *count, int *sent)
+ {
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
+- conn->hdrdgst_en? ", digest enabled" : "");
+-
+- /* Clear the data segment - needs to be filled in by the
+- * caller using iscsi_tcp_send_data_prep() */
+- memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
+-
+- /* If header digest is enabled, compute the CRC and
+- * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
+- * sufficient room.
+- */
+- if (conn->hdrdgst_en) {
+- iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
+- hdr + hdrlen);
+- hdrlen += ISCSI_DIGEST_SIZE;
++ int flags = 0; /* MSG_DONTWAIT; */
++ int res, size;
++
++ size = buf->sg.length - buf->sent;
++ BUG_ON(buf->sent + size > buf->sg.length);
++ if (size > *count)
++ size = *count;
++ if (buf->sent + size != buf->sg.length || *count != size)
++ flags |= MSG_MORE;
++
++ res = iscsi_send(conn, buf, size, flags);
++ debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
++ size, buf->sent, *count, *sent, res);
++ if (res >= 0) {
++ *count -= res;
++ *sent += res;
++ if (size != res)
++ return -EAGAIN;
++ return 0;
+ }
+
+- /* Remember header pointer for later, when we need
+- * to decide whether there's a payload to go along
+- * with the header. */
+- tcp_conn->out.hdr = hdr;
+-
+- iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
+- iscsi_tcp_send_hdr_done, NULL);
++ return res;
+ }
+
+-/*
+- * Prepare the send buffer for the payload data.
+- * Padding and checksumming will all be taken care
+- * of by the iscsi_segment routines.
+- */
+-static int
+-iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+- unsigned int count, unsigned int offset,
+- unsigned int len)
+-{
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct hash_desc *tx_hash = NULL;
+- unsigned int hdr_spec_len;
+-
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
+- tcp_conn, offset, len,
+- conn->datadgst_en? ", digest enabled" : "");
+-
+- /* Make sure the datalen matches what the caller
+- said he would send. */
+- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+- WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+-
+- if (conn->datadgst_en)
+- tx_hash = &tcp_conn->tx_hash;
+-
+- return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
+- sg, count, offset, len,
+- NULL, tx_hash);
+-}
+-
+-static void
+-iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+- size_t len)
++static inline void
++iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
++ struct iscsi_tcp_cmd_task *tcp_ctask)
+ {
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct hash_desc *tx_hash = NULL;
+- unsigned int hdr_spec_len;
+-
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
+- conn->datadgst_en? ", digest enabled" : "");
+-
+- /* Make sure the datalen matches what the caller
+- said he would send. */
+- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+- WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+-
+- if (conn->datadgst_en)
+- tx_hash = &tcp_conn->tx_hash;
+-
+- iscsi_segment_init_linear(&tcp_conn->out.data_segment,
+- data, len, NULL, tx_hash);
++ crypto_hash_init(&tcp_conn->tx_hash);
++ tcp_ctask->digest_count = 4;
+ }
+
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1287,17 +1210,13 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ *
+ * Called under connection lock.
+ **/
+-static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+- struct iscsi_r2t_info *r2t)
++static void
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ struct iscsi_r2t_info *r2t, int left)
+ {
+ struct iscsi_data *hdr;
+- int new_offset, left;
+-
+- BUG_ON(r2t->data_length - r2t->sent < 0);
+- left = r2t->data_length - r2t->sent;
+- if (left == 0)
+- return 0;
++ struct scsi_cmnd *sc = ctask->sc;
++ int new_offset;
+
+ hdr = &r2t->dtask.hdr;
+ memset(hdr, 0, sizeof(struct iscsi_data));
+@@ -1305,8 +1224,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1318,177 +1237,514 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ r2t->data_count = left;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+-
+ conn->dataout_pdus_cnt++;
+- return 1;
++
++ iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
++ sizeof(struct iscsi_hdr));
++
++ if (iscsi_buf_left(&r2t->sendbuf))
++ return;
++
++ if (sc->use_sg) {
++ iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
++ r2t->sg += 1;
++ } else {
++ iscsi_buf_init_iov(&r2t->sendbuf,
++ (char*)sc->request_buffer + new_offset,
++ r2t->data_count);
++ r2t->sg = NULL;
++ }
++}
++
++static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
++ unsigned long len)
++{
++ tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
++ if (!tcp_ctask->pad_count)
++ return;
++
++ tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
++ debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
++ tcp_ctask->xmstate |= XMSTATE_W_PAD;
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
++static void
++iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
++{
++ struct scsi_cmnd *sc = ctask->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++
++ tcp_ctask->sent = 0;
++ tcp_ctask->sg_count = 0;
++
++ if (sc->sc_data_direction == DMA_TO_DEVICE) {
++ tcp_ctask->xmstate = XMSTATE_W_HDR;
++ tcp_ctask->exp_r2tsn = 0;
++ BUG_ON(ctask->total_length == 0);
++
++ if (sc->use_sg) {
++ struct scatterlist *sg = sc->request_buffer;
++
++ iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
++ tcp_ctask->sg = sg + 1;
++ tcp_ctask->bad_sg = sg + sc->use_sg;
++ } else {
++ iscsi_buf_init_iov(&tcp_ctask->sendbuf,
++ sc->request_buffer,
++ sc->request_bufflen);
++ tcp_ctask->sg = NULL;
++ tcp_ctask->bad_sg = NULL;
++ }
++ debug_scsi("cmd [itt 0x%x total %d imm_data %d "
++ "unsol count %d, unsol offset %d]\n",
++ ctask->itt, ctask->total_length, ctask->imm_count,
++ ctask->unsol_count, ctask->unsol_offset);
++ } else
++ tcp_ctask->xmstate = XMSTATE_R_HDR;
++
++ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
++ sizeof(struct iscsi_hdr));
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ *
++ * Management xmit state machine consists of two states:
++ * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
++ * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
++ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
+- int err;
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++ int rc;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
++ debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
++ conn->id, tcp_mtask->xmstate, mtask->itt);
++
++ if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
++ if (mtask->data_count)
++ tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
++ if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
++ conn->stop_stage != STOP_CONN_RECOVER &&
++ conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
++ (u8*)tcp_mtask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
++ mtask->data_count);
++ if (rc) {
++ tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
++ if (mtask->data_count)
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
++ return rc;
++ }
++ }
++
++ if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
++ BUG_ON(!mtask->data_count);
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
++ /* FIXME: implement.
++ * Virtual buffer could be spreaded across multiple pages...
+ */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
++ do {
++ int rc;
++
++ rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
++ &mtask->data_count, &tcp_mtask->sent);
++ if (rc) {
++ tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
++ return rc;
++ }
++ } while (mtask->data_count);
++ }
+
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
++ BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE);
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
+
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
++ spin_lock_bh(&session->lock);
++ list_del(&conn->mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask,
++ sizeof(void*));
++ spin_unlock_bh(&session->lock);
++ }
++ return 0;
++}
++
++static inline int
++iscsi_send_read_hdr(struct iscsi_conn *conn,
++ struct iscsi_tcp_cmd_task *tcp_ctask)
++{
++ int rc;
++
++ tcp_ctask->xmstate &= ~XMSTATE_R_HDR;
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)tcp_ctask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, 0);
++ if (!rc) {
++ BUG_ON(tcp_ctask->xmstate != XMSTATE_IDLE);
++ return 0; /* wait for Data-In */
+ }
++ tcp_ctask->xmstate |= XMSTATE_R_HDR;
++ return rc;
++}
+
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++static inline int
++iscsi_send_write_hdr(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc;
+
+- /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ tcp_ctask->xmstate &= ~XMSTATE_W_HDR;
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)tcp_ctask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
++ if (rc) {
++ tcp_ctask->xmstate |= XMSTATE_W_HDR;
++ return rc;
++ }
+
+- if (!task->imm_count)
+- return 0;
++ if (ctask->imm_count) {
++ tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
++ iscsi_set_padding(tcp_ctask, ctask->imm_count);
+
+- /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
+- if (err)
+- return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ if (ctask->conn->datadgst_en) {
++ iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
++ tcp_ctask->immdigest = 0;
++ }
++ }
++
++ if (ctask->unsol_count)
++ tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
+ return 0;
+ }
+
+-/*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
+- *
+- * We're expected to return 0 when everything was transmitted succesfully,
+- * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+- * of error.
+- */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
+- int rc = 0;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int sent = 0, rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
++ iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
++ tcp_ctask->pad_count);
++ if (conn->datadgst_en)
++ crypto_hash_update(&tcp_conn->tx_hash,
++ &tcp_ctask->sendbuf.sg,
++ tcp_ctask->sendbuf.sg.length);
++ } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
++ return 0;
+
+-flush:
+- /* Flush any pending data first. */
+- rc = iscsi_tcp_flush(conn);
+- if (rc < 0)
+- return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
++ tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
++ debug_scsi("sending %d pad bytes for itt 0x%x\n",
++ tcp_ctask->pad_count, ctask->itt);
++ rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
++ &sent);
++ if (rc) {
++ debug_scsi("padding send failed %d\n", rc);
++ tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
++ }
++ return rc;
++}
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++static int
++iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ struct iscsi_buf *buf, uint32_t *digest)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask;
++ struct iscsi_tcp_conn *tcp_conn;
++ int rc, sent = 0;
++
++ if (!conn->datadgst_en)
+ return 0;
++
++ tcp_ctask = ctask->dd_data;
++ tcp_conn = conn->dd_data;
++
++ if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
++ crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
++ iscsi_buf_init_iov(buf, (char*)digest, 4);
+ }
++ tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
++
++ rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
++ if (!rc)
++ debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
++ ctask->itt);
++ else {
++ debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
++ *digest, ctask->itt);
++ tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
++ }
++ return rc;
++}
+
+- /* Are we done already? */
+- if (sc->sc_data_direction != DMA_TO_DEVICE)
+- return 0;
++static int
++iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
++ struct scatterlist **sg, int *sent, int *count,
++ struct iscsi_buf *digestbuf, uint32_t *digest)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int rc, buf_sent, offset;
++
++ while (*count) {
++ buf_sent = 0;
++ offset = sendbuf->sent;
++
++ rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
++ *sent = *sent + buf_sent;
++ if (buf_sent && conn->datadgst_en)
++ partial_sg_digest_update(&tcp_conn->tx_hash,
++ &sendbuf->sg, sendbuf->sg.offset + offset,
++ buf_sent);
++ if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
++ iscsi_buf_init_sg(sendbuf, *sg);
++ *sg = *sg + 1;
++ }
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (rc)
++ return rc;
++ }
+
+- /* Prepare a header for the unsolicited PDU.
+- * The amount of data we want to send will be
+- * in task->data_count.
+- * FIXME: return the data count instead.
+- */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ rc = iscsi_send_padding(conn, ctask);
++ if (rc)
++ return rc;
++
++ return iscsi_send_digest(conn, ctask, digestbuf, digest);
++}
++
++static int
++iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_data_task *dtask;
++ int rc;
++
++ tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
++ if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
++ dtask = &tcp_ctask->unsol_dtask;
++
++ iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
++ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
++ sizeof(struct iscsi_hdr));
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)dtask->hdrext);
++
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
++ iscsi_set_padding(tcp_ctask, ctask->data_count);
++ }
++
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
++ if (rc) {
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
++ tcp_ctask->xmstate |= XMSTATE_UNS_HDR;
++ return rc;
++ }
++
++ if (conn->datadgst_en) {
++ dtask = &tcp_ctask->unsol_dtask;
++ iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
++ dtask->digest = 0;
++ }
++
++ debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
++ ctask->itt, ctask->unsol_count, tcp_ctask->sent);
++ return 0;
++}
+
+- debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++static int
++iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc;
+
+- iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
++ BUG_ON(!ctask->unsol_count);
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
++send_hdr:
++ rc = iscsi_send_unsol_hdr(conn, ctask);
+ if (rc)
+- goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
+- goto flush;
+- } else {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_r2t_info *r2t;
++ return rc;
++ }
++
++ if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
++ struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
++ int start = tcp_ctask->sent;
+
+- /* All unsolicited PDUs sent. Check for solicited PDUs.
++ rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
++ &tcp_ctask->sent, &ctask->data_count,
++ &dtask->digestbuf, &dtask->digest);
++ ctask->unsol_count -= tcp_ctask->sent - start;
++ if (rc)
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
++ /*
++ * Done with the Data-Out. Next, check if we need
++ * to send another unsolicited Data-Out.
+ */
+- spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
+- if (r2t != NULL) {
+- /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
+- debug_scsi(" done with r2t %p\n", r2t);
+-
+- __kfifo_put(tcp_task->r2tpool.queue,
+- (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
+- }
++ if (ctask->unsol_count) {
++ debug_scsi("sending more uns\n");
++ tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
++ goto send_hdr;
+ }
++ }
++ return 0;
++}
+
+- if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_session *session = conn->session;
++ struct iscsi_r2t_info *r2t;
++ struct iscsi_data_task *dtask;
++ int left, rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ if (!tcp_ctask->r2t) {
++ spin_lock_bh(&session->lock);
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ spin_unlock_bh(&session->lock);
++ }
++send_hdr:
++ r2t = tcp_ctask->r2t;
++ dtask = &r2t->dtask;
++
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &r2t->headbuf,
++ (u8*)dtask->hdrext);
++ rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
++ if (rc) {
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
++ return rc;
+ }
+- spin_unlock_bh(&session->lock);
+
+- /* Waiting for more R2Ts to arrive. */
+- if (r2t == NULL) {
+- debug_tcp("no R2Ts yet\n");
+- return 0;
++ if (conn->datadgst_en) {
++ iscsi_data_digest_init(conn->dd_data, tcp_ctask);
++ dtask->digest = 0;
+ }
+
+- debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
+- r2t->data_offset + r2t->sent, r2t->data_count);
++ iscsi_set_padding(tcp_ctask, r2t->data_count);
++ debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
++ r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
++ r2t->sent);
++ }
+
+- iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+- sizeof(struct iscsi_hdr));
++ if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
++ r2t = tcp_ctask->r2t;
++ dtask = &r2t->dtask;
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
+- r2t->data_offset + r2t->sent,
+- r2t->data_count);
++ rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
++ &r2t->sent, &r2t->data_count,
++ &dtask->digestbuf, &dtask->digest);
+ if (rc)
+- goto fail;
+- tcp_task->sent += r2t->data_count;
+- r2t->sent += r2t->data_count;
+- goto flush;
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
++
++ /*
++ * Done with this Data-Out. Next, check if we have
++ * to send another Data-Out for this R2T.
++ */
++ BUG_ON(r2t->data_length - r2t->sent < 0);
++ left = r2t->data_length - r2t->sent;
++ if (left) {
++ iscsi_solicit_data_cont(conn, ctask, r2t, left);
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ goto send_hdr;
++ }
++
++ /*
++ * Done with this R2T. Check if there are more
++ * outstanding R2Ts ready to be processed.
++ */
++ spin_lock_bh(&session->lock);
++ tcp_ctask->r2t = NULL;
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
++ sizeof(void*));
++ if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
++ sizeof(void*))) {
++ tcp_ctask->r2t = r2t;
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ spin_unlock_bh(&session->lock);
++ goto send_hdr;
++ }
++ spin_unlock_bh(&session->lock);
+ }
+ return 0;
+-fail:
+- iscsi_conn_failure(conn, rc);
+- return -EIO;
++}
++
++static int
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc = 0;
++
++ debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
++ conn->id, tcp_ctask->xmstate, ctask->itt);
++
++ /*
++ * serialize with TMF AbortTask
++ */
++ if (ctask->mtask)
++ return rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_R_HDR)
++ return iscsi_send_read_hdr(conn, tcp_ctask);
++
++ if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
++ rc = iscsi_send_write_hdr(conn, ctask);
++ if (rc)
++ return rc;
++ }
++
++ if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
++ rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
++ &tcp_ctask->sent, &ctask->imm_count,
++ &tcp_ctask->immbuf, &tcp_ctask->immdigest);
++ if (rc)
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
++ }
++
++ rc = iscsi_send_unsol_pdu(conn, ctask);
++ if (rc)
++ return rc;
++
++ rc = iscsi_send_sol_pdu(conn, ctask);
++ if (rc)
++ return rc;
++
++ return rc;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -1498,7 +1754,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,31 +1764,45 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++ /* initial operational parameters */
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+- if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ if (IS_ERR(tcp_conn->tx_hash.tfm)) {
++ printk(KERN_ERR "Could not create connection due to crc32c "
++ "loading error %ld. Make sure the crc32c module is "
++ "built as a module or into the kernel\n",
++ PTR_ERR(tcp_conn->tx_hash.tfm));
++ goto free_tcp_conn;
++ }
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->rx_hash.flags = 0;
+- if (IS_ERR(tcp_conn->rx_hash.tfm))
++ if (IS_ERR(tcp_conn->rx_hash.tfm)) {
++ printk(KERN_ERR "Could not create connection due to crc32c "
++ "loading error %ld. Make sure the crc32c module is "
++ "built as a module or into the kernel\n",
++ PTR_ERR(tcp_conn->rx_hash.tfm));
+ goto free_tx_tfm;
++ }
+
+ return cls_conn;
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "Could not create connection due to crc32c "
+- "loading error. Make sure the crc32c "
+- "module is built as a module or into the "
+- "kernel\n");
++free_tcp_conn:
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1540,21 +1810,18 @@ free_conn:
+ static void
+ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+ {
+- struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct socket *sock = tcp_conn->sock;
+
+- if (!sock)
++ if (!tcp_conn->sock)
+ return;
+
+- sock_hold(sock->sk);
++ sock_hold(tcp_conn->sock->sk);
+ iscsi_conn_restore_callbacks(tcp_conn);
+- sock_put(sock->sk);
++ sock_put(tcp_conn->sock->sk);
+
+- spin_lock_bh(&session->lock);
++ sock_release(tcp_conn->sock);
+ tcp_conn->sock = NULL;
+- spin_unlock_bh(&session->lock);
+- sockfd_put(sock);
++ conn->recv_lock = NULL;
+ }
+
+ static void
+@@ -1564,13 +1831,14 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+@@ -1579,60 +1847,9 @@ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+-
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+-}
+-
+-static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
+- char *buf, int *port,
+- int (*getname)(struct socket *, struct sockaddr *,
+- int *addrlen))
+-{
+- struct sockaddr_storage *addr;
+- struct sockaddr_in6 *sin6;
+- struct sockaddr_in *sin;
+- int rc = 0, len;
+-
+- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+- if (!addr)
+- return -ENOMEM;
+-
+- if (getname(sock, (struct sockaddr *) addr, &len)) {
+- rc = -ENODEV;
+- goto free_addr;
+- }
+-
+- switch (addr->ss_family) {
+- case AF_INET:
+- sin = (struct sockaddr_in *)addr;
+- spin_lock_bh(&conn->session->lock);
+- sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
+- *port = be16_to_cpu(sin->sin_port);
+- spin_unlock_bh(&conn->session->lock);
+- break;
+- case AF_INET6:
+- sin6 = (struct sockaddr_in6 *)addr;
+- spin_lock_bh(&conn->session->lock);
+- sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
+- *port = be16_to_cpu(sin6->sin6_port);
+- spin_unlock_bh(&conn->session->lock);
+- break;
+- }
+-free_addr:
+- kfree(addr);
+- return rc;
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
+ }
+
+ static int
+@@ -1640,8 +1857,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1651,28 +1866,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ /* lookup for existing socket */
+ sock = sockfd_lookup((int)transport_eph, &err);
+ if (!sock) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "sockfd_lookup failed %d\n", err);
++ printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
+ return -EEXIST;
+ }
+- /*
+- * copy these values now because if we drop the session
+- * userspace may still want to query the values since we will
+- * be using them for the reconnect
+- */
+- err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
+- &conn->portal_port, kernel_getpeername);
+- if (err)
+- goto free_socket;
+-
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
+- if (err)
+- goto free_socket;
+
+ err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (err)
+- goto free_socket;
++ return err;
+
+ /* bind iSCSI connection and socket */
+ tcp_conn->sock = sock;
+@@ -1683,17 +1883,38 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+ * set receive state machine into initial state
+ */
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++
+ return 0;
++}
+
+-free_socket:
+- sockfd_put(sock);
+- return err;
++/* called with host lock */
++static void
++iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++
++ iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
++ sizeof(struct iscsi_hdr));
++ tcp_mtask->xmstate = XMSTATE_IMM_HDR;
++ tcp_mtask->sent = 0;
++
++ if (mtask->data_count)
++ iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
++ mtask->data_count);
+ }
+
+ static int
+@@ -1706,8 +1927,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1937,18 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4,
++ (void***)&tcp_ctask->r2ts,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1957,12 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1973,12 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ }
+ }
+
+@@ -1769,6 +1994,9 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
+ switch(param) {
+ case ISCSI_PARAM_HDRDGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
++ if (conn->hdrdgst_en)
++ tcp_conn->hdr_size += sizeof(__u32);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
+@@ -1777,12 +2005,12 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ sscanf(buf, "%d", &value);
+- if (value <= 0 || !is_power_of_2(value))
+- return -EINVAL;
+- if (session->max_r2t == value)
++ if (session->max_r2t == roundup_pow_of_two(value))
+ break;
+ iscsi_r2tpool_free(session);
+ iscsi_set_param(cls_conn, param, buf, buflen);
++ if (session->max_r2t & (session->max_r2t - 1))
++ session->max_r2t = roundup_pow_of_two(session->max_r2t);
+ if (iscsi_r2tpool_alloc(session))
+ return -ENOMEM;
+ break;
+@@ -1798,18 +2026,41 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ struct inet_sock *inet;
++ struct ipv6_pinfo *np;
++ struct sock *sk;
+ int len;
+
+ switch(param) {
+ case ISCSI_PARAM_CONN_PORT:
+- spin_lock_bh(&conn->session->lock);
+- len = sprintf(buf, "%hu\n", conn->portal_port);
+- spin_unlock_bh(&conn->session->lock);
++ mutex_lock(&conn->xmitmutex);
++ if (!tcp_conn->sock) {
++ mutex_unlock(&conn->xmitmutex);
++ return -EINVAL;
++ }
++
++ inet = inet_sk(tcp_conn->sock->sk);
++ len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
++ mutex_unlock(&conn->xmitmutex);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+- spin_lock_bh(&conn->session->lock);
+- len = sprintf(buf, "%s\n", conn->portal_address);
+- spin_unlock_bh(&conn->session->lock);
++ mutex_lock(&conn->xmitmutex);
++ if (!tcp_conn->sock) {
++ mutex_unlock(&conn->xmitmutex);
++ return -EINVAL;
++ }
++
++ sk = tcp_conn->sock->sk;
++ if (sk->sk_family == PF_INET) {
++ inet = inet_sk(sk);
++ len = sprintf(buf, NIPQUAD_FMT "\n",
++ NIPQUAD(inet->daddr));
++ } else {
++ np = inet6_sk(sk);
++ len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr));
++ }
++ mutex_unlock(&conn->xmitmutex);
+ break;
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+@@ -1843,93 +2094,65 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ ctask->hdr = &tcp_ctask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++
++ mtask->hdr = &tcp_mtask->hdr;
++ }
++
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
+-
+-static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+-{
+- blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
+- blk_queue_dma_alignment(sdev->request_queue, 0);
+- return 0;
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static struct scsi_host_template iscsi_sht = {
+- .module = THIS_MODULE,
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+- .sg_tablesize = 4096,
++ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
++ .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .max_sectors = 0xFFFF,
+ .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+- .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+- .slave_configure = iscsi_tcp_slave_configure,
+ .proc_name = "iscsi_tcp",
+ .this_id = -1,
+ };
+@@ -1956,16 +2179,12 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+- ISCSI_TARGET_NAME | ISCSI_TPGT |
+- ISCSI_USERNAME | ISCSI_PASSWORD |
+- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+- ISCSI_HOST_INITIATOR_NAME |
+- ISCSI_HOST_NETDEV_NAME,
++ ISCSI_TARGET_NAME |
++ ISCSI_TPGT,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1978,15 +2197,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+- /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
+- .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_cmd_init,
++ .init_mgmt_task = iscsi_tcp_mgmt_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2217,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..3273683 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -24,61 +24,68 @@
+
+ #include <scsi/libiscsi.h>
+
++/* Socket's Receive state machine */
++#define IN_PROGRESS_WAIT_HEADER 0x0
++#define IN_PROGRESS_HEADER_GATHER 0x1
++#define IN_PROGRESS_DATA_RECV 0x2
++#define IN_PROGRESS_DDIGEST_RECV 0x3
++
++/* xmit state machine */
++#define XMSTATE_IDLE 0x0
++#define XMSTATE_R_HDR 0x1
++#define XMSTATE_W_HDR 0x2
++#define XMSTATE_IMM_HDR 0x4
++#define XMSTATE_IMM_DATA 0x8
++#define XMSTATE_UNS_INIT 0x10
++#define XMSTATE_UNS_HDR 0x20
++#define XMSTATE_UNS_DATA 0x40
++#define XMSTATE_SOL_HDR 0x80
++#define XMSTATE_SOL_DATA 0x100
++#define XMSTATE_W_PAD 0x200
++#define XMSTATE_W_RESEND_PAD 0x400
++#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
++
++#define ISCSI_PAD_LEN 4
++#define ISCSI_SG_TABLESIZE SG_ALL
++#define ISCSI_TCP_MAX_CMD_LEN 16
++
+ struct crypto_hash;
+ struct socket;
+-struct iscsi_tcp_conn;
+-struct iscsi_segment;
+-
+-typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
+- struct iscsi_segment *);
+-
+-struct iscsi_segment {
+- unsigned char *data;
+- unsigned int size;
+- unsigned int copied;
+- unsigned int total_size;
+- unsigned int total_copied;
+-
+- struct hash_desc *hash;
+- unsigned char recv_digest[ISCSI_DIGEST_SIZE];
+- unsigned char digest[ISCSI_DIGEST_SIZE];
+- unsigned int digest_len;
+-
+- struct scatterlist *sg;
+- void *sg_mapped;
+- unsigned int sg_offset;
+-
+- iscsi_segment_done_fn_t *done;
+-};
+
+ /* Socket connection recieve helper */
+ struct iscsi_tcp_recv {
+ struct iscsi_hdr *hdr;
+- struct iscsi_segment segment;
+-
+- /* Allocate buffer for BHS + AHS */
+- uint32_t hdr_buf[64];
++ struct sk_buff *skb;
++ int offset;
++ int len;
++ int hdr_offset;
++ int copy;
++ int copied;
++ int padding;
++ struct iscsi_cmd_task *ctask; /* current cmd in progress */
+
+ /* copied and flipped values */
+ int datalen;
+-};
+-
+-/* Socket connection send helper */
+-struct iscsi_tcp_send {
+- struct iscsi_hdr *hdr;
+- struct iscsi_segment segment;
+- struct iscsi_segment data_segment;
++ int datadgst;
++ char zero_copy_hdr;
+ };
+
+ struct iscsi_tcp_conn {
+ struct iscsi_conn *iscsi_conn;
+ struct socket *sock;
++ struct iscsi_hdr hdr; /* header placeholder */
++ char hdrext[4*sizeof(__u16) +
++ sizeof(__u32)];
++ int data_copied;
+ int stop_stage; /* conn_stop() flag: *
+ * stop to recover, *
+ * stop to terminate */
++ /* iSCSI connection-wide sequencing */
++ int hdr_size; /* PDU header size */
++
+ /* control data */
+ struct iscsi_tcp_recv in; /* TCP receive context */
+- struct iscsi_tcp_send out; /* TCP send context */
++ int in_progress; /* connection state machine */
+
+ /* old values for socket callbacks */
+ void (*old_data_ready)(struct sock *, int);
+@@ -93,14 +100,29 @@ struct iscsi_tcp_conn {
+ uint32_t sendpage_failures_cnt;
+ uint32_t discontiguous_hdr_cnt;
+
+- int error;
+-
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+ };
+
++struct iscsi_buf {
++ struct scatterlist sg;
++ unsigned int sent;
++ char use_sendmsg;
++};
++
+ struct iscsi_data_task {
+ struct iscsi_data hdr; /* PDU */
+- char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
++ char hdrext[sizeof(__u32)]; /* Header-Digest */
++ struct iscsi_buf digestbuf; /* digest buffer */
++ uint32_t digest; /* data digest */
++};
++
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[sizeof(__u32)]; /* Header-Digest */
++ int xmstate; /* mgmt xmit progress */
++ struct iscsi_buf headbuf; /* header buffer */
++ struct iscsi_buf sendbuf; /* in progress buffer */
++ int sent;
+ };
+
+ struct iscsi_r2t_info {
+@@ -108,26 +130,38 @@ struct iscsi_r2t_info {
+ __be32 exp_statsn; /* copied from R2T */
+ uint32_t data_length; /* copied from R2T */
+ uint32_t data_offset; /* copied from R2T */
++ struct iscsi_buf headbuf; /* Data-Out Header Buffer */
++ struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/
+ int sent; /* R2T sequence progress */
+ int data_count; /* DATA-Out payload progress */
++ struct scatterlist *sg; /* per-R2T SG list */
+ int solicit_datasn;
+- struct iscsi_data_task dtask; /* Data-Out header buf */
++ struct iscsi_data_task dtask; /* which data task */
+ };
+
+-struct iscsi_tcp_task {
+- struct iscsi_hdr_buff {
+- struct iscsi_cmd cmd_hdr;
+- char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+- ISCSI_DIGEST_SIZE];
+- } hdr;
+-
++struct iscsi_tcp_cmd_task {
++ struct iscsi_cmd hdr;
++ char hdrext[4*sizeof(__u16)+ /* AHS */
++ sizeof(__u32)]; /* HeaderDigest */
++ char pad[ISCSI_PAD_LEN];
++ int pad_count; /* padded bytes */
++ struct iscsi_buf headbuf; /* header buf (xmit) */
++ struct iscsi_buf sendbuf; /* in progress buffer*/
++ int xmstate; /* xmit xtate machine */
+ int sent;
+- uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
++ struct scatterlist *sg; /* per-cmd SG list */
++ struct scatterlist *bad_sg; /* assert statement */
++ int sg_count; /* SG's to process */
++ uint32_t exp_r2tsn;
+ int data_offset;
+- struct iscsi_r2t_info *r2t; /* in progress R2T */
+- struct iscsi_pool r2tpool;
++ struct iscsi_r2t_info *r2t; /* in progress R2T */
++ struct iscsi_queue r2tpool;
+ struct kfifo *r2tqueue;
+- struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
++ struct iscsi_r2t_info **r2ts;
++ int digest_count;
++ uint32_t immdigest; /* for imm data */
++ struct iscsi_buf immbuf; /* for imm data digest */
++ struct iscsi_data_task unsol_dtask; /* unsol data task */
+ };
+
+ #endif /* ISCSI_H */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..3f5b9b4 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -22,9 +22,9 @@
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+ #include <linux/types.h>
++#include <linux/mutex.h>
+ #include <linux/kfifo.h>
+ #include <linux/delay.h>
+-#include <linux/log2.h>
+ #include <asm/unaligned.h>
+ #include <net/tcp.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -38,211 +38,92 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
+-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+-#define SNA32_CHECK 2147483648UL
+-
+-static int iscsi_sna_lt(u32 n1, u32 n2)
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
+ {
+- return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
+ }
++EXPORT_SYMBOL_GPL(class_to_transport_session);
+
+-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+-static int iscsi_sna_lte(u32 n1, u32 n2)
+-{
+- return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
+-}
++#define INVALID_SN_DELTA 0xffff
+
+-void
+-iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
++int
++iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ {
+ uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
+ uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
+
+- /*
+- * standard specifies this check for when to update expected and
+- * max sequence numbers
+- */
+- if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
+- return;
+-
+- if (exp_cmdsn != session->exp_cmdsn &&
+- !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
++ if (max_cmdsn < exp_cmdsn -1 &&
++ max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
++ return ISCSI_ERR_MAX_CMDSN;
++ if (max_cmdsn > session->max_cmdsn ||
++ max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
++ session->max_cmdsn = max_cmdsn;
++ if (exp_cmdsn > session->exp_cmdsn ||
++ exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
+ session->exp_cmdsn = exp_cmdsn;
+
+- if (max_cmdsn != session->max_cmdsn &&
+- !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+- session->max_cmdsn = max_cmdsn;
+- /*
+- * if the window closed with IO queued, then kick the
+- * xmit thread
+- */
+- if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
+- }
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
++EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
+-{
+- unsigned exp_len = task->hdr_len + len;
+-
+- if (exp_len > task->hdr_max) {
+- WARN_ON(1);
+- return -EINVAL;
+- }
+-
+- WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
+- return 0;
+-}
+-
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
+- int rc;
+-
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
+- if (rc)
+- return rc;
+- hdr->opcode = ISCSI_OP_SCSI_CMD;
+- hdr->flags = ISCSI_ATTR_SIMPLE;
+- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
+- hdr->cmdsn = cpu_to_be32(session->cmdsn);
+- session->cmdsn++;
+- hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
+-
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++
++ hdr->opcode = ISCSI_OP_SCSI_CMD;
++ hdr->flags = ISCSI_ATTR_SIMPLE;
++ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
++ hdr->itt = build_itt(ctask->itt, conn->id, session->age);
++ hdr->data_length = cpu_to_be32(sc->request_bufflen);
++ hdr->cmdsn = cpu_to_be32(session->cmdsn);
++ session->cmdsn++;
++ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
++
++ ctask->data_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,318 +139,117 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->imm_count = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (ctask->total_length >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(ctask->total_length,
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(ctask->hdr->dlength, ctask->imm_count);
+ } else
+- zero_data(hdr->dlength);
++ zero_data(ctask->hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min(session->first_burst,
++ ctask->total_length) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
++ ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
++ ctask->datasn = 0;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+- /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
+-
+- WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+- hdrlength /= ISCSI_PAD_LEN;
+-
+- WARN_ON(hdrlength >= 256);
+- hdr->hlength = hdrlength & 0xFF;
+-
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
+-
+ conn->scsicmd_pdus_cnt++;
+- debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+- return 0;
+ }
++EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
++ struct iscsi_session *session = ctask->conn->session;
++ struct scsi_cmnd *sc = ctask->sc;
+
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+-
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+
+-void __iscsi_get_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- atomic_inc(&task->refcount);
++ atomic_inc(&ctask->refcount);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ spin_lock_bh(&ctask->conn->session->lock);
++ __iscsi_get_ctask(ctask);
++ spin_unlock_bh(&ctask->conn->session->lock);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+-/*
+- * session lock must be held
+- */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
+- int err)
++static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct scsi_cmnd *sc;
+-
+- sc = task->sc;
+- if (!sc)
+- return;
+-
+- if (task->state == ISCSI_TASK_PENDING)
+- /*
+- * cmd never made it to the xmit thread, so we should not count
+- * the cmd in the sequencing
+- */
+- conn->session->queued_cmdsn--;
+- else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
+-
+- sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ spin_lock_bh(&ctask->conn->session->lock);
++ __iscsi_put_ctask(ctask);
++ spin_unlock_bh(&ctask->conn->session->lock);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
+-{
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
+-
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
+-}
+-
+-static struct iscsi_task *
+-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, uint32_t data_size)
+-{
+- struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
+-
+- if (session->state == ISCSI_STATE_TERMINATE)
+- return NULL;
+-
+- if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
+- hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- /*
+- * Login and Text are sent serially, in
+- * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
+- */
+- task = conn->login_task;
+- else {
+- BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+- BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+-
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
+- return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+- }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+-
+- if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
+- } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
+-
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
+-}
+-
+-int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+- char *data, uint32_t data_size)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_session *session = conn->session;
+- int err = 0;
+-
+- spin_lock_bh(&session->lock);
+- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+- err = -EPERM;
+- spin_unlock_bh(&session->lock);
+- return err;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+-
+ /**
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+ * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
+ * then completes the command and task.
+ **/
+-static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
+- int datalen)
++static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ struct iscsi_cmd_task *ctask, char *data,
++ int datalen)
+ {
++ int rc;
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
++
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc) {
++ sc->result = DID_ERROR << 16;
++ goto out;
++ }
+
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+@@ -584,14 +264,13 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+
+ if (datalen < 2) {
+ invalid_datalen:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "Got CHECK_CONDITION but invalid data "
+- "buffer size of %d\n", datalen);
++ printk(KERN_ERR "iscsi: Got CHECK_CONDITION but "
++ "invalid data buffer size of %d\n", datalen);
+ sc->result = DID_BAD_TARGET << 16;
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,36 +280,28 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ if (sc->sc_data_direction == DMA_TO_DEVICE)
++ goto out;
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+- ISCSI_FLAG_CMD_OVERFLOW)) {
++ if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+- if (res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+- scsi_set_resid(sc, res_count);
++ if (res_count > 0 && res_count <= sc->request_bufflen)
++ sc->resid = res_count;
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++ else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
++ sc->resid = be32_to_cpu(rhdr->residual_count);
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
++ return rc;
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -640,42 +311,18 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ conn->tmfrsp_pdus_cnt++;
+
+- if (conn->tmf_state != TMF_QUEUED)
++ if (conn->tmabort_state != TMABORT_INITIAL)
+ return;
+
+ if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
+- conn->tmf_state = TMF_SUCCESS;
++ conn->tmabort_state = TMABORT_SUCCESS;
+ else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
+- conn->tmf_state = TMF_NOT_FOUND;
++ conn->tmabort_state = TMABORT_NOT_FOUND;
+ else
+- conn->tmf_state = TMF_FAILED;
++ conn->tmabort_state = TMABORT_FAILED;
+ wake_up(&conn->ehwait);
+ }
+
+-static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+-{
+- struct iscsi_nopout hdr;
+- struct iscsi_task *task;
+-
+- if (!rhdr && conn->ping_task)
+- return;
+-
+- memset(&hdr, 0, sizeof(struct iscsi_nopout));
+- hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
+- hdr.flags = ISCSI_FLAG_CMD_FINAL;
+-
+- if (rhdr) {
+- memcpy(hdr.lun, rhdr->lun, 8);
+- hdr.ttt = rhdr->ttt;
+- hdr.itt = RESERVED_ITT;
+- } else
+- hdr.ttt = RESERVED_ITT;
+-
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
+- iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+-}
+-
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+ {
+@@ -692,41 +339,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
+ memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
+ itt = get_itt(rejected_pdu.itt);
+- iscsi_conn_printk(KERN_ERR, conn,
+- "itt 0x%x had pdu (op 0x%x) rejected "
+- "due to DataDigest error.\n", itt,
+- rejected_pdu.opcode);
++ printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
++ "due to DataDigest error.\n", itt,
++ rejected_pdu.opcode);
+ }
+ }
+ return 0;
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -742,24 +363,105 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+- conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ rc = iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ rc = iscsi_check_assign_cmdsn(session,
++ (struct iscsi_nopin*)hdr);
++ if (rc)
++ goto done;
++
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ list_del(&mtask->running);
++ if (conn->login_mtask != mtask)
++ __kfifo_put(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ list_del(&mtask->running);
++ if (conn->login_mtask != mtask)
++ __kfifo_put(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
++ rc = iscsi_check_assign_cmdsn(session,
++ (struct iscsi_nopin*)hdr);
++ if (rc)
++ goto done;
+
+ switch(opcode) {
+ case ISCSI_OP_NOOP_IN:
+@@ -771,7 +473,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
+ break;
+
+- iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
++ rc = ISCSI_ERR_CONN_FAILED;
+ break;
+ case ISCSI_OP_REJECT:
+ rc = iscsi_handle_reject(conn, hdr, data, datalen);
+@@ -785,101 +488,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
++done:
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+@@ -896,63 +508,55 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ printk(KERN_ERR "iscsi: received itt %x expected "
++ "session age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
++ (conn->id << ISCSI_CID_SHIFT)) {
++ printk(KERN_ERR "iscsi: received itt %x, expected "
++ "CID (%x)\n", (__force u32)hdr->itt, conn->id);
++ return ISCSI_ERR_BAD_ITT;
++ }
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ printk(KERN_INFO "iscsi: dropping ctask with "
++ "itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ printk(KERN_ERR "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,55 +578,29 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
+-static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
+ {
+- struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc, was_logout = 0;
+
+- /*
+- * Check for iSCSI window and take care of CmdSN wrap-around
+- */
+- if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
+- debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u "
+- "CmdSN %u/%u\n", session->exp_cmdsn,
+- session->max_cmdsn, session->cmdsn,
+- session->queued_cmdsn);
+- return -ENOSPC;
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
++ conn->session->state = ISCSI_STATE_IN_RECOVERY;
++ iscsi_block_session(session_to_cls(conn->session));
++ was_logout = 1;
+ }
+- return 0;
+-}
+-
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
+-{
+- struct iscsi_task *task = conn->task;
+- int rc;
+-
+- __iscsi_get_task(task);
+- spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
+- spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
+- if (!rc)
+- /* done with this task */
+- conn->task = NULL;
+- return rc;
+-}
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ if (rc)
++ return rc;
+
+-/**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
+- *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
+- */
+-void iscsi_requeue_task(struct iscsi_task *task)
+-{
+- struct iscsi_conn *conn = task->conn;
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
+
+- list_move_tail(&task->running, &conn->requeue);
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ if (was_logout) {
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++ return -ENODATA;
++ }
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1035,94 +613,106 @@ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+ **/
+ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ {
++ struct iscsi_transport *tt;
+ int rc = 0;
+
+- spin_lock_bh(&conn->session->lock);
+ if (unlikely(conn->suspend_tx)) {
+ debug_scsi("conn %d Tx suspended!\n", conn->id);
+- spin_unlock_bh(&conn->session->lock);
+ return -ENODATA;
+ }
+-
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
+- if (rc)
+- goto again;
+- }
++ tt = conn->session->tt;
+
+ /*
+- * process mgmt pdus like nops before commands since we should
+- * only have one nop-out as a ping from us and targets should not
+- * overflow us with nop-ins
++ * Transmit in the following order:
++ *
++ * 1) un-finished xmit (ctask or mtask)
++ * 2) immediate control PDUs
++ * 3) write data
++ * 4) SCSI commands
++ * 5) non-immediate control PDUs
++ *
++ * No need to lock around __kfifo_get as long as
++ * there's one producer and one consumer.
+ */
+-check_mgmt:
+- while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
+- continue;
+- }
+- rc = iscsi_xmit_task(conn);
+- if (rc)
+- goto again;
+- }
+
+- /* process pending command queue */
+- while (!list_empty(&conn->xmitqueue)) {
+- if (conn->tmf_state == TMF_QUEUED)
+- break;
++ BUG_ON(conn->ctask && conn->mtask);
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
+- continue;
+- }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
+- continue;
+- }
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ iscsi_get_ctask(conn->ctask);
++ rc = tt->xmit_cmd_task(conn, conn->ctask);
++ iscsi_put_ctask(conn->ctask);
+ if (rc)
+ goto again;
+- /*
+- * we could continuously get new task requests so
+- * we need to check the mgmt queue for nops that need to
+- * be sent to aviod starvation
+- */
+- if (!list_empty(&conn->mgmtqueue))
+- goto check_mgmt;
++ /* done with this in-progress ctask */
++ conn->ctask = NULL;
++ }
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
+ }
+
+- while (!list_empty(&conn->requeue)) {
+- if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
+- break;
++ /* process immediate first */
++ if (unlikely(__kfifo_len(conn->immqueue))) {
++ while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
++ sizeof(void*))) {
++ spin_lock_bh(&conn->session->lock);
++ list_add_tail(&conn->mtask->running,
++ &conn->mgmt_run_list);
++ spin_unlock_bh(&conn->session->lock);
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
++ }
++ }
+
++ /* process command queue */
++ spin_lock_bh(&conn->session->lock);
++ while (!list_empty(&conn->xmitqueue)) {
+ /*
+- * we always do fastlogout - conn stop code will clean up.
++ * iscsi tcp may readd the task to the xmitqueue to send
++ * write data
+ */
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- break;
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ __iscsi_get_ctask(conn->ctask);
++ spin_unlock_bh(&conn->session->lock);
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
+- if (rc)
++ rc = tt->xmit_cmd_task(conn, conn->ctask);
++
++ spin_lock_bh(&conn->session->lock);
++ __iscsi_put_ctask(conn->ctask);
++ if (rc) {
++ spin_unlock_bh(&conn->session->lock);
+ goto again;
+- if (!list_empty(&conn->mgmtqueue))
+- goto check_mgmt;
++ }
+ }
+ spin_unlock_bh(&conn->session->lock);
++ /* done with this ctask */
++ conn->ctask = NULL;
++
++ /* process the rest control plane PDUs, if any */
++ if (unlikely(__kfifo_len(conn->mgmtqueue))) {
++ while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
++ sizeof(void*))) {
++ spin_lock_bh(&conn->session->lock);
++ list_add_tail(&conn->mtask->running,
++ &conn->mgmt_run_list);
++ spin_unlock_bh(&conn->session->lock);
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
++ }
++ }
++
+ return -ENODATA;
+
+ again:
+ if (unlikely(conn->suspend_tx))
+- rc = -ENODATA;
+- spin_unlock_bh(&conn->session->lock);
++ return -ENODATA;
++
+ return rc;
+ }
+
+@@ -1134,9 +724,11 @@ static void iscsi_xmitworker(struct work_struct *work)
+ /*
+ * serialize Xmit worker on a per-connection basis.
+ */
++ mutex_lock(&conn->xmitmutex);
+ do {
+ rc = iscsi_data_xmit(conn);
+ } while (rc >= 0 || rc == -EAGAIN);
++ mutex_unlock(&conn->xmitmutex);
+ }
+
+ enum {
+@@ -1148,36 +740,25 @@ enum {
+ FAILURE_SESSION_TERMINATE,
+ FAILURE_SESSION_IN_RECOVERY,
+ FAILURE_SESSION_RECOVERY_TIMEOUT,
+- FAILURE_SESSION_LOGGING_OUT,
+- FAILURE_SESSION_NOT_READY,
+ };
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+ sc->SCp.ptr = NULL;
+
+ host = sc->device->host;
+- spin_unlock(host->host_lock);
++ session = iscsi_hostdata(host->hostdata);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
+- if (reason) {
+- sc->result = reason;
+- goto fault;
+- }
+-
+ /*
+ * ISCSI_STATE_FAILED is a temp. state. The recovery
+ * code will decide what is best to do with command queued
+@@ -1191,95 +772,77 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ * be entering our queuecommand while a block is starting
+ * up because the block code is not locked)
+ */
+- switch (session->state) {
+- case ISCSI_STATE_IN_RECOVERY:
++ if (session->state == ISCSI_STATE_IN_RECOVERY) {
+ reason = FAILURE_SESSION_IN_RECOVERY;
+- sc->result = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_STATE_LOGGING_OUT:
+- reason = FAILURE_SESSION_LOGGING_OUT;
+- sc->result = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_STATE_RECOVERY_FAILED:
++ goto reject;
++ }
++
++ if (session->state == ISCSI_STATE_RECOVERY_FAILED)
+ reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
+- sc->result = DID_NO_CONNECT << 16;
+- break;
+- case ISCSI_STATE_TERMINATE:
++ else if (session->state == ISCSI_STATE_TERMINATE)
+ reason = FAILURE_SESSION_TERMINATE;
+- sc->result = DID_NO_CONNECT << 16;
+- break;
+- default:
++ else
+ reason = FAILURE_SESSION_FREED;
+- sc->result = DID_NO_CONNECT << 16;
+- }
+ goto fault;
+ }
+
++ /*
++ * Check for iSCSI window and take care of CmdSN wrap-around
++ */
++ if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
++ reason = FAILURE_WINDOW_CLOSED;
++ goto reject;
++ }
++
+ conn = session->leadconn;
+ if (!conn) {
+ reason = FAILURE_SESSION_FREED;
+- sc->result = DID_NO_CONNECT << 16;
+ goto fault;
+ }
+
+- if (iscsi_check_cmdsn_window_closed(conn)) {
+- reason = FAILURE_WINDOW_CLOSED;
+- goto reject;
+- }
+-
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
+-
+- session->queued_cmdsn++;
++ sc->SCp.ptr = (char *)ctask;
++
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->mtask = NULL;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++ ctask->total_length = sc->request_bufflen;
++ iscsi_prep_scsi_cmd_pdu(ctask);
++
++ session->tt->init_cmd_task(ctask);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
++ debug_scsi(
++ "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
++ "win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ spin_unlock(&session->lock);
+- spin_lock(host->host_lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ return 0;
+
+ reject:
+ spin_unlock(&session->lock);
+ debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
+- spin_lock(host->host_lock);
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ fault:
+ spin_unlock(&session->lock);
+- debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
+- spin_lock(host->host_lock);
++ printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
++ sc->cmnd[0], reason);
++ sc->result = (DID_NO_CONNECT << 16);
++ sc->resid = sc->request_bufflen;
++ sc->scsi_done(sc);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_queuecommand);
+@@ -1293,15 +856,106 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
++static int
++iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++ struct iscsi_mgmt_task *mtask;
++
++ spin_lock_bh(&session->lock);
++ if (session->state == ISCSI_STATE_TERMINATE) {
++ spin_unlock_bh(&session->lock);
++ return -EPERM;
++ }
++ if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
++ hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ /*
++ * Login and Text are sent serially, in
++ * request-followed-by-response sequence.
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
++ */
++ mtask = conn->login_mtask;
++ else {
++ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
++ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
++
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*))) {
++ spin_unlock_bh(&session->lock);
++ return -ENOSPC;
++ }
++ }
++
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, conn->id, session->age);
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE))
++ session->cmdsn++;
++ } else
++ /* do not advance CmdSN */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++
++ if (data_size) {
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
++ } else
++ mtask->data_count = 0;
++
++ INIT_LIST_HEAD(&mtask->running);
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask, data, data_size);
++ spin_unlock_bh(&session->lock);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode, hdr->itt, data_size);
++
++ /*
++ * since send_pdu() could be called at least from two contexts,
++ * we need to serialize __kfifo_put, so we don't have to take
++ * additional lock on fast data-path
++ */
++ if (hdr->opcode & ISCSI_OP_IMMEDIATE)
++ __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
++ else
++ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
++
++ scsi_queue_work(session->host, &conn->xmitwork);
++ return 0;
++}
++
++int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_conn *conn = cls_conn->dd_data;
++ int rc;
++
++ mutex_lock(&conn->xmitmutex);
++ rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
++ mutex_unlock(&conn->xmitmutex);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
++
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
++ struct iscsi_conn *conn = session->leadconn;
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+ session->state = ISCSI_STATE_RECOVERY_FAILED;
+- if (session->leadconn)
+- wake_up(&session->leadconn->ehwait);
++ if (conn)
++ wake_up(&conn->ehwait);
+ }
+ spin_unlock_bh(&session->lock);
+ }
+@@ -1309,32 +963,33 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
++ int fail_session = 0;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
+-
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+ failed:
+ debug_scsi("failing host reset: session terminated "
+ "[CID %d age %d]\n", conn->id, session->age);
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+
++ if (sc->SCp.phase == session->age) {
++ debug_scsi("failing connection CID %d due to SCSI host reset\n",
++ conn->id);
++ fail_session = 1;
++ }
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
+ /*
+ * we drop the lock here but the leadconn cannot be destoyed while
+ * we are in the scsi eh
+ */
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ if (fail_session)
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+
+ debug_scsi("iscsi_eh_host_reset wait for relogin\n");
+ wait_event_interruptible(conn->ehwait,
+@@ -1344,717 +999,472 @@ failed:
+ if (signal_pending(current))
+ flush_signals(current);
+
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_LOGGED_IN)
+- iscsi_session_printk(KERN_INFO, session,
+- "host reset succeeded\n");
++ printk(KERN_INFO "iscsi: host reset succeeded\n");
+ else
+ goto failed;
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
+ return SUCCESS;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
+
+-static void iscsi_tmf_timedout(unsigned long data)
++static void iscsi_tmabort_timedout(unsigned long data)
+ {
+- struct iscsi_conn *conn = (struct iscsi_conn *)data;
++ struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&session->lock);
+- if (conn->tmf_state == TMF_QUEUED) {
+- conn->tmf_state = TMF_TIMEDOUT;
+- debug_scsi("tmf timedout\n");
++ if (conn->tmabort_state == TMABORT_INITIAL) {
++ conn->tmabort_state = TMABORT_TIMEDOUT;
++ debug_scsi("tmabort timedout [sc %p itt 0x%x]\n",
++ ctask->sc, ctask->itt);
+ /* unblock eh_abort() */
+ wake_up(&conn->ehwait);
+ }
+ spin_unlock(&session->lock);
+ }
+
+-static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+- struct iscsi_tm *hdr, int age,
+- int timeout)
++/* must be called with the mutex lock */
++static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
++ struct iscsi_cmd_task *ctask)
+ {
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_tm *hdr = &conn->tmhdr;
++ int rc;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+- NULL, 0);
+- if (!task) {
+- spin_unlock_bh(&session->lock);
++ /*
++ * ctask timed out but session is OK requests must be serialized.
++ */
++ memset(hdr, 0, sizeof(struct iscsi_tm));
++ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
++ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
++ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
++
++ rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
++ NULL, 0);
++ if (rc) {
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- spin_lock_bh(&session->lock);
+- debug_scsi("tmf exec failure\n");
+- return -EPERM;
++ debug_scsi("abort sent failure [itt 0x%x] %d\n", ctask->itt,
++ rc);
++ return rc;
+ }
+- conn->tmfcmd_pdus_cnt++;
+- conn->tmf_timer.expires = timeout * HZ + jiffies;
+- conn->tmf_timer.function = iscsi_tmf_timedout;
+- conn->tmf_timer.data = (unsigned long)conn;
+- add_timer(&conn->tmf_timer);
+- debug_scsi("tmf set timeout\n");
+
++ debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
++
++ spin_lock_bh(&session->lock);
++ ctask->mtask = (struct iscsi_mgmt_task *)
++ session->mgmt_cmds[get_itt(hdr->itt) -
++ ISCSI_MGMT_ITT_OFFSET];
++
++ if (conn->tmabort_state == TMABORT_INITIAL) {
++ conn->tmfcmd_pdus_cnt++;
++ conn->tmabort_timer.expires = 10*HZ + jiffies;
++ conn->tmabort_timer.function = iscsi_tmabort_timedout;
++ conn->tmabort_timer.data = (unsigned long)ctask;
++ add_timer(&conn->tmabort_timer);
++ debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
++ }
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++ mutex_unlock(&conn->xmitmutex);
+
+ /*
+ * block eh thread until:
+ *
+- * 1) tmf response
+- * 2) tmf timeout
++ * 1) abort response
++ * 2) abort timeout
+ * 3) session is terminated or restarted or userspace has
+ * given up on recovery
+ */
+- wait_event_interruptible(conn->ehwait, age != session->age ||
++ wait_event_interruptible(conn->ehwait,
++ sc->SCp.phase != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN ||
+- conn->tmf_state != TMF_QUEUED);
++ conn->tmabort_state != TMABORT_INITIAL);
+ if (signal_pending(current))
+ flush_signals(current);
+- del_timer_sync(&conn->tmf_timer);
++ del_timer_sync(&conn->tmabort_timer);
+
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
+- if (age != session->age ||
+- session->state != ISCSI_STATE_LOGGED_IN)
+- return -ENOTCONN;
++ mutex_lock(&conn->xmitmutex);
+ return 0;
+ }
+
+ /*
+- * Fail commands. session lock held and recv side suspended and xmit
+- * thread flushed
++ * xmit mutex and session lock must be held
+ */
+-static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+- int error)
++static struct iscsi_mgmt_task *
++iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
+ {
+- struct iscsi_task *task, *tmp;
++ int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
++ struct iscsi_mgmt_task *task;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ debug_scsi("searching %d tasks\n", nr_tasks);
+
+- /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
+- }
+- }
++ for (i = 0; i < nr_tasks; i++) {
++ __kfifo_get(fifo, (void*)&task, sizeof(void*));
++ debug_scsi("check task %u\n", task->itt);
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ if (task->itt == itt) {
++ debug_scsi("matched task\n");
++ return task;
+ }
+- }
+
+- /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
+- }
++ __kfifo_put(fifo, (void*)&task, sizeof(void*));
+ }
++ return NULL;
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
+-{
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+-
+-static void iscsi_start_tx(struct iscsi_conn *conn)
+-{
+- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-}
+-
+-static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
++static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+- enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+-
+- cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
+-
+- debug_scsi("scsi cmd %p timedout\n", scmd);
+-
+- spin_lock(&session->lock);
+- if (session->state != ISCSI_STATE_LOGGED_IN) {
+- /*
+- * We are probably in the middle of iscsi recovery so let
+- * that complete and handle the error.
+- */
+- rc = EH_RESET_TIMER;
+- goto done;
+- }
++ struct iscsi_conn *conn = ctask->conn;
++ struct iscsi_session *session = conn->session;
+
+- conn = session->leadconn;
+- if (!conn) {
+- /* In the middle of shuting down */
+- rc = EH_RESET_TIMER;
+- goto done;
+- }
++ if (!ctask->mtask)
++ return -EINVAL;
+
+- if (!conn->recv_timeout && !conn->ping_timeout)
+- goto done;
+- /*
+- * if the ping timedout then we are in the middle of cleaning up
+- * and can let the iscsi eh handle it
+- */
+- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+- (conn->ping_timeout * HZ), jiffies))
+- rc = EH_RESET_TIMER;
+- /*
+- * if we are about to check the transport then give the command
+- * more time
+- */
+- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
+- jiffies))
+- rc = EH_RESET_TIMER;
+- /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
+- rc = EH_RESET_TIMER;
+-done:
+- spin_unlock(&session->lock);
+- debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
+- return rc;
++ if (!iscsi_remove_mgmt_task(conn->immqueue, ctask->mtask->itt))
++ list_del(&ctask->mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
++ sizeof(void*));
++ ctask->mtask = NULL;
++ return 0;
+ }
+
+-static void iscsi_check_transport_timeouts(unsigned long data)
++/*
++ * session lock and xmitmutex must be held
++ */
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ int err)
+ {
+- struct iscsi_conn *conn = (struct iscsi_conn *)data;
+- struct iscsi_session *session = conn->session;
+- unsigned long recv_timeout, next_timeout = 0, last_recv;
++ struct scsi_cmnd *sc;
+
+- spin_lock(&session->lock);
+- if (session->state != ISCSI_STATE_LOGGED_IN)
+- goto done;
+-
+- recv_timeout = conn->recv_timeout;
+- if (!recv_timeout)
+- goto done;
+-
+- recv_timeout *= HZ;
+- last_recv = conn->last_recv;
+- if (conn->ping_task &&
+- time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+- jiffies)) {
+- iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+- "expired, last rx %lu, last ping %lu, "
+- "now %lu\n", conn->ping_timeout, last_recv,
+- conn->last_ping, jiffies);
+- spin_unlock(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ sc = ctask->sc;
++ if (!sc)
+ return;
+- }
+
+- if (time_before_eq(last_recv + recv_timeout, jiffies)) {
+- /* send a ping to try to provoke some traffic */
+- debug_scsi("Sending nopout as ping on conn %p\n", conn);
+- iscsi_send_nopout(conn, NULL);
+- next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+- } else
+- next_timeout = last_recv + recv_timeout;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
++ iscsi_ctask_mtask_cleanup(ctask);
+
+- debug_scsi("Setting next tmo %lu\n", next_timeout);
+- mod_timer(&conn->transport_timer, next_timeout);
+-done:
+- spin_unlock(&session->lock);
+-}
+-
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
+- struct iscsi_tm *hdr)
+-{
+- memset(hdr, 0, sizeof(*hdr));
+- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+- hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ sc->result = err;
++ sc->resid = sc->request_bufflen;
++ /* release ref from queuecommand */
++ __iscsi_put_ctask(ctask);
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
+- struct iscsi_tm *hdr;
+- int rc, age;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ struct iscsi_session *session;
++ int rc;
+
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ debug_scsi("sc never reached iscsi layer or it completed.\n");
+- spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+ }
+
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ conn = ctask->conn;
++ session = conn->session;
++
++ conn->eh_abort_cnt++;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
++
++ mutex_lock(&conn->xmitmutex);
++ spin_lock_bh(&session->lock);
++
+ /*
+ * If we are not logged in or we have started a new session
+ * then let the host reset code handle this
+ */
+- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
+- sc->SCp.phase != session->age) {
+- spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+- return FAILED;
+- }
+-
+- conn = session->leadconn;
+- conn->eh_abort_cnt++;
+- age = session->age;
+-
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ if (session->state != ISCSI_STATE_LOGGED_IN ||
++ sc->SCp.phase != session->age)
++ goto failed;
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
++ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+- goto success;
++ goto success_rel_mutex;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
+- goto success;
++ /* what should we do here ? */
++ if (conn->ctask == ctask) {
++ printk(KERN_INFO "iscsi: sc %p itt 0x%x partially sent. "
++ "Failing abort\n", sc, ctask->itt);
++ goto failed;
+ }
+
+- /* only have one tmf outstanding at a time */
+- if (conn->tmf_state != TMF_INITIAL)
+- goto failed;
+- conn->tmf_state = TMF_QUEUED;
++ if (ctask->state == ISCSI_TASK_PENDING)
++ goto success_cleanup;
+
+- hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ conn->tmabort_state = TMABORT_INITIAL;
+
+- if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+- rc = FAILED;
+- goto failed;
+- }
++ spin_unlock_bh(&session->lock);
++ rc = iscsi_exec_abort_task(sc, ctask);
++ spin_lock_bh(&session->lock);
+
+- switch (conn->tmf_state) {
+- case TMF_SUCCESS:
+- spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+- iscsi_suspend_tx(conn);
+- /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
+- */
+- spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
+- conn->tmf_state = TMF_INITIAL;
+- spin_unlock(&session->lock);
+- iscsi_start_tx(conn);
+- goto success_unlocked;
+- case TMF_TIMEDOUT:
+- spin_unlock_bh(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- goto failed_unlocked;
+- case TMF_NOT_FOUND:
+- if (!sc->SCp.ptr) {
+- conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ if (rc || sc->SCp.phase != session->age ||
++ session->state != ISCSI_STATE_LOGGED_IN)
++ goto failed;
++ iscsi_ctask_mtask_cleanup(ctask);
++
++ switch (conn->tmabort_state) {
++ case TMABORT_SUCCESS:
++ goto success_cleanup;
++ case TMABORT_NOT_FOUND:
++ if (!ctask->sc) {
++ /* ctask completed before tmf abort response */
++ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+- goto success;
++ goto success_rel_mutex;
+ }
+ /* fall through */
+ default:
+- conn->tmf_state = TMF_INITIAL;
++ /* timedout or failed */
++ spin_unlock_bh(&session->lock);
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ spin_lock_bh(&session->lock);
+ goto failed;
+ }
+
+-success:
++success_cleanup:
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ spin_unlock_bh(&session->lock);
+-success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
+- mutex_unlock(&session->eh_mutex);
+- return SUCCESS;
+
+-failed:
+- spin_unlock_bh(&session->lock);
+-failed_unlocked:
+- debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
+- mutex_unlock(&session->eh_mutex);
+- return FAILED;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+-
+-static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+-{
+- memset(hdr, 0, sizeof(*hdr));
+- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+- hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->rtt = RESERVED_ITT;
+-}
+-
+-int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+-{
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+- struct iscsi_tm *hdr;
+- int rc = FAILED;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+- debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+-
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+ /*
+- * Just check if we are not logged in. We cannot check for
+- * the phase because the reset could come from a ioctl.
++ * clean up task if aborted. we have the xmitmutex so grab
++ * the recv lock as a writer
+ */
+- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+- goto unlock;
+- conn = session->leadconn;
+-
+- /* only have one tmf outstanding at a time */
+- if (conn->tmf_state != TMF_INITIAL)
+- goto unlock;
+- conn->tmf_state = TMF_QUEUED;
+-
+- hdr = &conn->tmhdr;
+- iscsi_prep_lun_reset_pdu(sc, hdr);
+-
+- if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+- session->lu_reset_timeout)) {
+- rc = FAILED;
+- goto unlock;
+- }
+-
+- switch (conn->tmf_state) {
+- case TMF_SUCCESS:
+- break;
+- case TMF_TIMEDOUT:
+- spin_unlock_bh(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- goto done;
+- default:
+- conn->tmf_state = TMF_INITIAL;
+- goto unlock;
+- }
+-
+- rc = SUCCESS;
+- spin_unlock_bh(&session->lock);
+-
+- iscsi_suspend_tx(conn);
+-
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_all_commands(conn, sc->device->lun, DID_ERROR);
+- conn->tmf_state = TMF_INITIAL;
++ fail_command(conn, ctask, DID_ABORT << 16);
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+- iscsi_start_tx(conn);
+- goto done;
++success_rel_mutex:
++ mutex_unlock(&conn->xmitmutex);
++ return SUCCESS;
+
+-unlock:
++failed:
+ spin_unlock_bh(&session->lock);
+-done:
+- debug_scsi("iscsi_eh_device_reset %s\n",
+- rc == SUCCESS ? "SUCCESS" : "FAILED");
+- mutex_unlock(&session->eh_mutex);
+- return rc;
++ mutex_unlock(&conn->xmitmutex);
++
++ debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
++ return FAILED;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
++EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+
+-/*
+- * Pre-allocate a pool of @max items of @item_size. By default, the pool
+- * should be accessed via kfifo_{get,put} on q->queue.
+- * Optionally, the caller can obtain the array of object pointers
+- * by passing in a non-NULL @items pointer
+- */
+ int
+-iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
++iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
+ {
+- int i, num_arrays = 1;
++ int i;
+
+- memset(q, 0, sizeof(*q));
++ *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
++ if (*items == NULL)
++ return -ENOMEM;
+
+ q->max = max;
+-
+- /* If the user passed an items pointer, he wants a copy of
+- * the array. */
+- if (items)
+- num_arrays++;
+- q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+- if (q->pool == NULL)
+- goto enomem;
++ q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
++ if (q->pool == NULL) {
++ kfree(*items);
++ return -ENOMEM;
++ }
+
+ q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
+ GFP_KERNEL, NULL);
+- if (q->queue == ERR_PTR(-ENOMEM))
+- goto enomem;
++ if (q->queue == ERR_PTR(-ENOMEM)) {
++ kfree(q->pool);
++ kfree(*items);
++ return -ENOMEM;
++ }
+
+ for (i = 0; i < max; i++) {
+- q->pool[i] = kzalloc(item_size, GFP_KERNEL);
++ q->pool[i] = kmalloc(item_size, GFP_KERNEL);
+ if (q->pool[i] == NULL) {
+- q->max = i;
+- goto enomem;
++ int j;
++
++ for (j = 0; j < i; j++)
++ kfree(q->pool[j]);
++
++ kfifo_free(q->queue);
++ kfree(q->pool);
++ kfree(*items);
++ return -ENOMEM;
+ }
++ memset(q->pool[i], 0, item_size);
++ (*items)[i] = q->pool[i];
+ __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
+ }
+-
+- if (items) {
+- *items = q->pool + max;
+- memcpy(*items, q->pool, max * sizeof(void *));
+- }
+-
+ return 0;
+-
+-enomem:
+- iscsi_pool_free(q);
+- return -ENOMEM;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_init);
+
+-void iscsi_pool_free(struct iscsi_pool *q)
++void iscsi_pool_free(struct iscsi_queue *q, void **items)
+ {
+ int i;
+
+ for (i = 0; i < q->max; i++)
+- kfree(q->pool[i]);
+- if (q->pool)
+- kfree(q->pool);
++ kfree(items[i]);
++ kfree(q->pool);
++ kfree(items);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
+- }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
+- }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ *hostno = shost->host_no;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
+- return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+- session->fast_abort = 1;
+- session->lu_reset_timeout = 15;
+- session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
+- session->queued_cmdsn = session->cmdsn = initial_cmdsn;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = ISCSI_XMIT_CMDS_MAX;
++ session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+- mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
++
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
+
+- if (iscsi_add_session(cls_session, id))
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
+- iscsi_pool_free(&session->cmdpool);
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
++mgmtpool_alloc_fail:
++ iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
+- iscsi_pool_free(&session->cmdpool);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
++ iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+
+- kfree(session->password);
+- kfree(session->password_in);
+- kfree(session->username);
+- kfree(session->username_in);
+ kfree(session->targetname);
+- kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+ iscsi_destroy_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,68 +1472,74 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+ conn->id = conn_idx;
+ conn->exp_statsn = 0;
+- conn->tmf_state = TMF_INITIAL;
+-
+- init_timer(&conn->transport_timer);
+- conn->transport_timer.data = (unsigned long)conn;
+- conn->transport_timer.function = iscsi_check_transport_timeouts;
+-
++ conn->tmabort_state = TMABORT_INITIAL;
+ INIT_LIST_HEAD(&conn->run_list);
+ INIT_LIST_HEAD(&conn->mgmt_run_list);
+- INIT_LIST_HEAD(&conn->mgmtqueue);
+ INIT_LIST_HEAD(&conn->xmitqueue);
+- INIT_LIST_HEAD(&conn->requeue);
++
++ /* initialize general immediate & non-immediate PDU commands queue */
++ conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
++ GFP_KERNEL, NULL);
++ if (conn->immqueue == ERR_PTR(-ENOMEM))
++ goto immqueue_alloc_fail;
++
++ conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
++ GFP_KERNEL, NULL);
++ if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
++ goto mgmtqueue_alloc_fail;
++
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+- init_timer(&conn->tmf_timer);
++ init_timer(&conn->tmabort_timer);
++ mutex_init(&conn->xmitmutex);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
++ kfifo_free(conn->mgmtqueue);
++mgmtqueue_alloc_fail:
++ kfifo_free(conn->immqueue);
++immqueue_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2142,7 +1558,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_session *session = conn->session;
+ unsigned long flags;
+
+- del_timer_sync(&conn->transport_timer);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++ mutex_lock(&conn->xmitmutex);
+
+ spin_lock_bh(&session->lock);
+ conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
+@@ -2155,6 +1572,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
++ mutex_unlock(&conn->xmitmutex);
++
+ /*
+ * Block until all in-progress commands for this connection
+ * time out or fail.
+@@ -2167,10 +1586,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_irqrestore(session->host->host_lock, flags);
+ msleep_interruptible(500);
+- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
+- "host_busy %d host_failed %d\n",
+- session->host->host_busy,
+- session->host->host_failed);
++ printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d "
++ "host_failed %d\n", session->host->host_busy,
++ session->host->host_failed);
+ /*
+ * force eh_abort() to unblock
+ */
+@@ -2178,17 +1596,23 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+
+ /* flush queued up work because we free the connection below */
+- iscsi_suspend_tx(conn);
++ scsi_flush_work(session->host);
+
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+- if (session->leadconn == conn)
++ if (session->leadconn == conn) {
+ session->leadconn = NULL;
++ /* no connections exits.. reset sequencing */
++ session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
++ }
+ spin_unlock_bh(&session->lock);
+
++ kfifo_free(conn->immqueue);
++ kfifo_free(conn->mgmtqueue);
++
+ iscsi_destroy_conn(cls_conn);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
+@@ -2199,41 +1623,21 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_session *session = conn->session;
+
+ if (!session) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "can't start unbound connection\n");
++ printk(KERN_ERR "iscsi: can't start unbound connection\n");
+ return -EPERM;
+ }
+
+ if ((session->imm_data_en || !session->initial_r2t_en) &&
+ session->first_burst > session->max_burst) {
+- iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
+- "first_burst %d max_burst %d\n",
+- session->first_burst, session->max_burst);
++ printk("iscsi: invalid burst lengths: "
++ "first_burst %d max_burst %d\n",
++ session->first_burst, session->max_burst);
+ return -EINVAL;
+ }
+
+- if (conn->ping_timeout && !conn->recv_timeout) {
+- iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
+- "zero. Using 5 seconds\n.");
+- conn->recv_timeout = 5;
+- }
+-
+- if (conn->recv_timeout && !conn->ping_timeout) {
+- iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
+- "zero. Using 5 seconds.\n");
+- conn->ping_timeout = 5;
+- }
+-
+ spin_lock_bh(&session->lock);
+ conn->c_stage = ISCSI_CONN_STARTED;
+ session->state = ISCSI_STATE_LOGGED_IN;
+- session->queued_cmdsn = session->cmdsn;
+-
+- conn->last_recv = jiffies;
+- conn->last_ping = jiffies;
+- if (conn->recv_timeout && conn->ping_timeout)
+- mod_timer(&conn->transport_timer,
+- jiffies + (conn->recv_timeout * HZ));
+
+ switch(conn->stop_stage) {
+ case STOP_CONN_RECOVER:
+@@ -2242,11 +1646,13 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ * commands after successful recovery
+ */
+ conn->stop_stage = 0;
+- conn->tmf_state = TMF_INITIAL;
++ conn->tmabort_state = TMABORT_INITIAL;
+ session->age++;
+- if (session->age == 16)
+- session->age = 0;
+- break;
++ spin_unlock_bh(&session->lock);
++
++ iscsi_unblock_session(session_to_cls(session));
++ wake_up(&conn->ehwait);
++ return 0;
+ case STOP_CONN_TERM:
+ conn->stop_stage = 0;
+ break;
+@@ -2255,8 +1661,6 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
+- wake_up(&conn->ehwait);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+@@ -2264,23 +1668,52 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ while (__kfifo_get(conn->immqueue, (void*)&mtask, sizeof(void*)) ||
++ __kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
++ if (mtask == conn->login_mtask)
++ continue;
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ list_del(&mtask->running);
++
++ if (mtask == conn->login_mtask)
++ continue;
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
++ }
++
++ conn->mtask = NULL;
++}
++
++/* Fail commands. Mutex and session lock held and recv side suspended */
++static void fail_all_commands(struct iscsi_conn *conn)
++{
++ struct iscsi_cmd_task *ctask, *tmp;
++
++ /* flush pending */
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
++ ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
++ }
++
++ /* fail all other running */
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ debug_scsi("failing in progress sc %p itt 0x%x\n",
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+
+- conn->task = NULL;
++ conn->ctask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2288,13 +1721,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ {
+ int old_stop_stage;
+
+- del_timer_sync(&conn->transport_timer);
+-
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (conn->stop_stage == STOP_CONN_TERM) {
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return;
+ }
+
+@@ -2311,9 +1740,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ old_stop_stage = conn->stop_stage;
+ conn->stop_stage = flag;
+ conn->c_stage = ISCSI_CONN_STOPPED;
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ spin_unlock_bh(&session->lock);
+
+- iscsi_suspend_tx(conn);
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
++ mutex_lock(&conn->xmitmutex);
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +1760,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2334,11 +1768,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ * flush queues.
+ */
+ spin_lock_bh(&session->lock);
+- fail_all_commands(conn, -1,
+- STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
++ fail_all_commands(conn);
+ flush_control_queues(session, conn);
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
++ mutex_unlock(&conn->xmitmutex);
+ }
+
+ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+@@ -2352,8 +1786,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ iscsi_start_session_recovery(session, conn, flag);
+ break;
+ default:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid stop flag %d\n", flag);
++ printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+@@ -2361,7 +1794,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2387,21 +1820,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ uint32_t value;
+
+ switch(param) {
+- case ISCSI_PARAM_FAST_ABORT:
+- sscanf(buf, "%d", &session->fast_abort);
+- break;
+- case ISCSI_PARAM_ABORT_TMO:
+- sscanf(buf, "%d", &session->abort_timeout);
+- break;
+- case ISCSI_PARAM_LU_RESET_TMO:
+- sscanf(buf, "%d", &session->lu_reset_timeout);
+- break;
+- case ISCSI_PARAM_PING_TMO:
+- sscanf(buf, "%d", &conn->ping_timeout);
+- break;
+- case ISCSI_PARAM_RECV_TMO:
+- sscanf(buf, "%d", &conn->recv_timeout);
+- break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ sscanf(buf, "%d", &conn->max_recv_dlength);
+ break;
+@@ -2449,30 +1867,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ case ISCSI_PARAM_EXP_STATSN:
+ sscanf(buf, "%u", &conn->exp_statsn);
+ break;
+- case ISCSI_PARAM_USERNAME:
+- kfree(session->username);
+- session->username = kstrdup(buf, GFP_KERNEL);
+- if (!session->username)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_USERNAME_IN:
+- kfree(session->username_in);
+- session->username_in = kstrdup(buf, GFP_KERNEL);
+- if (!session->username_in)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_PASSWORD:
+- kfree(session->password);
+- session->password = kstrdup(buf, GFP_KERNEL);
+- if (!session->password)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_PASSWORD_IN:
+- kfree(session->password_in);
+- session->password_in = kstrdup(buf, GFP_KERNEL);
+- if (!session->password_in)
+- return -ENOMEM;
+- break;
+ case ISCSI_PARAM_TARGET_NAME:
+ /* this should not change between logins */
+ if (session->targetname)
+@@ -2500,14 +1894,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,19 +1905,11 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+- case ISCSI_PARAM_FAST_ABORT:
+- len = sprintf(buf, "%d\n", session->fast_abort);
+- break;
+- case ISCSI_PARAM_ABORT_TMO:
+- len = sprintf(buf, "%d\n", session->abort_timeout);
+- break;
+- case ISCSI_PARAM_LU_RESET_TMO:
+- len = sprintf(buf, "%d\n", session->lu_reset_timeout);
+- break;
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ len = sprintf(buf, "%d\n", session->initial_r2t_en);
+ break;
+@@ -2562,27 +1940,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_TPGT:
+ len = sprintf(buf, "%d\n", session->tpgt);
+ break;
+- case ISCSI_PARAM_USERNAME:
+- len = sprintf(buf, "%s\n", session->username);
+- break;
+- case ISCSI_PARAM_USERNAME_IN:
+- len = sprintf(buf, "%s\n", session->username_in);
+- break;
+- case ISCSI_PARAM_PASSWORD:
+- len = sprintf(buf, "%s\n", session->password);
+- break;
+- case ISCSI_PARAM_PASSWORD_IN:
+- len = sprintf(buf, "%s\n", session->password_in);
+- break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2598,12 +1955,6 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ int len;
+
+ switch(param) {
+- case ISCSI_PARAM_PING_TMO:
+- len = sprintf(buf, "%u\n", conn->ping_timeout);
+- break;
+- case ISCSI_PARAM_RECV_TMO:
+- len = sprintf(buf, "%u\n", conn->recv_timeout);
+- break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ len = sprintf(buf, "%u\n", conn->max_recv_dlength);
+ break;
+@@ -2639,72 +1990,6 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+
+-int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+- char *buf)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
+- int len;
+-
+- switch (param) {
+- case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- len = sprintf(buf, "%s\n", "default");
+- else
+- len = sprintf(buf, "%s\n", ihost->netdev);
+- break;
+- case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- len = sprintf(buf, "%s\n", "default");
+- else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
+- break;
+- case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
+- break;
+- default:
+- return -ENOSYS;
+- }
+-
+- return len;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+-
+-int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+- char *buf, int buflen)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
+-
+- switch (param) {
+- case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+- default:
+- return -ENOSYS;
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_set_param);
+-
+ MODULE_AUTHOR("Mike Christie");
+ MODULE_DESCRIPTION("iSCSI library functions");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..caf1836 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,28 +30,26 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
+-#define ISCSI_CONN_ATTRS 13
+-#define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_SESSION_ATTRS 11
++#define ISCSI_CONN_ATTRS 11
++#define ISCSI_HOST_ATTRS 0
++#define ISCSI_TRANSPORT_VERSION "2.0-724"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+-static struct workqueue_struct *iscsi_eh_timer_workq;
+
+ /*
+ * list of registered transports and lock that must
+@@ -64,12 +62,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +77,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,163 +115,22 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+-
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+-
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
+-
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
+- ihost->scan_workq = create_singlethread_workqueue(
+- ihost->scan_workq_name);
+- if (!ihost->scan_workq)
+- return -ENOMEM;
+- return 0;
+-}
+-
+-static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
+-{
+- struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+-
+- destroy_workqueue(ihost->scan_workq);
+ return 0;
+ }
+
+ static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+ "iscsi_host",
+ iscsi_setup_host,
+- iscsi_remove_host,
++ NULL,
+ NULL);
+
+ static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+@@ -340,54 +201,6 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
+ * The following functions can be used by LLDs that allocate
+ * their own scsi_hosts or by software iscsi LLDs
+ */
+-static struct {
+- int value;
+- char *name;
+-} iscsi_session_state_names[] = {
+- { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
+- { ISCSI_SESSION_FAILED, "FAILED" },
+- { ISCSI_SESSION_FREE, "FREE" },
+-};
+-
+-static const char *iscsi_session_state_name(int state)
+-{
+- int i;
+- char *name = NULL;
+-
+- for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
+- if (iscsi_session_state_names[i].value == state) {
+- name = iscsi_session_state_names[i].name;
+- break;
+- }
+- }
+- return name;
+-}
+-
+-int iscsi_session_chkready(struct iscsi_cls_session *session)
+-{
+- unsigned long flags;
+- int err;
+-
+- spin_lock_irqsave(&session->lock, flags);
+- switch (session->state) {
+- case ISCSI_SESSION_LOGGED_IN:
+- err = 0;
+- break;
+- case ISCSI_SESSION_FAILED:
+- err = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_SESSION_FREE:
+- err = DID_NO_CONNECT << 16;
+- break;
+- default:
+- err = DID_NO_CONNECT << 16;
+- break;
+- }
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_session_chkready);
+-
+ static void iscsi_session_release(struct device *dev)
+ {
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
+@@ -403,114 +216,22 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+-/**
+- * iscsi_scan_finished - helper to report when running scans are done
+- * @shost: scsi host
+- * @time: scan run time
+- *
+- * This function can be used by drives like qla4xxx to report to the scsi
+- * layer when the scans it kicked off at module load time are done.
+- */
+-int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+-{
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- /*
+- * qla4xxx will have kicked off some session unblocks before calling
+- * scsi_scan_host, so just wait for them to complete.
+- */
+- return !atomic_read(&ihost->nr_scans);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+-
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+-
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
+-}
+-
+-static void iscsi_scan_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session, scan_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
+-
+- iscsi_user_scan_session(&session->dev, &scan_data);
+- atomic_dec(&ihost->nr_scans);
++ return 0;
+ }
+
+ static void session_recovery_timedout(struct work_struct *work)
+@@ -518,24 +239,9 @@ static void session_recovery_timedout(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ recovery_work.work);
+- unsigned long flags;
+-
+- iscsi_cls_session_printk(KERN_INFO, session,
+- "session recovery timed out after %d secs\n",
+- session->recovery_tmo);
+
+- spin_lock_irqsave(&session->lock, flags);
+- switch (session->state) {
+- case ISCSI_SESSION_FAILED:
+- session->state = ISCSI_SESSION_FREE;
+- break;
+- case ISCSI_SESSION_LOGGED_IN:
+- case ISCSI_SESSION_FREE:
+- /* we raced with the unblock's flush */
+- spin_unlock_irqrestore(&session->lock, flags);
+- return;
+- }
+- spin_unlock_irqrestore(&session->lock, flags);
++ dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
++ "out after %d secs\n", session->recovery_tmo);
+
+ if (session->transport->session_recovery_timedout)
+ session->transport->session_recovery_timedout(session);
+@@ -543,201 +249,73 @@ static void session_recovery_timedout(struct work_struct *work)
+ scsi_target_unblock(&session->dev);
+ }
+
+-static void __iscsi_unblock_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- unblock_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+-
+- /*
+- * The recovery and unblock work get run from the same workqueue,
+- * so try to cancel it if it was going to run after this unblock.
+- */
+- cancel_delayed_work(&session->recovery_work);
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_LOGGED_IN;
+- spin_unlock_irqrestore(&session->lock, flags);
+- /* start IO */
+- scsi_target_unblock(&session->dev);
+- /*
+- * Only do kernel scanning if the driver is properly hooked into
+- * the async scanning code (drivers like iscsi_tcp do login and
+- * scanning from userspace).
+- */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
+-}
+-
+-/**
+- * iscsi_unblock_session - set a session as logged in and start IO.
+- * @session: iscsi session
+- *
+- * Mark a session as ready to accept IO.
+- */
+ void iscsi_unblock_session(struct iscsi_cls_session *session)
+ {
+- queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+- /*
+- * make sure all the events have completed before tell the driver
+- * it is safe
+- */
+- flush_workqueue(iscsi_eh_timer_workq);
++ if (!cancel_delayed_work(&session->recovery_work))
++ flush_scheduled_work();
++ scsi_target_unblock(&session->dev);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_unblock_session);
+
+-static void __iscsi_block_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- block_work);
+- unsigned long flags;
+-
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_FAILED;
+- spin_unlock_irqrestore(&session->lock, flags);
+- scsi_target_block(&session->dev);
+- queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
+- session->recovery_tmo * HZ);
+-}
+-
+ void iscsi_block_session(struct iscsi_cls_session *session)
+ {
+- queue_work(iscsi_eh_timer_workq, &session->block_work);
++ scsi_target_block(&session->dev);
++ schedule_delayed_work(&session->recovery_work,
++ session->recovery_tmo * HZ);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_block_session);
+
+-static void __iscsi_unbind_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- unbind_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+-
+- /* Prevent new scans and make sure scanning is not in progress */
+- mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return;
+- }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+-
+- scsi_remove_target(&session->dev);
+- iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+-}
+-
+-static int iscsi_unbind_session(struct iscsi_cls_session *session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+-
+- return queue_work(ihost->scan_workq, &session->unbind_work);
+-}
+-
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+
+ session->transport = transport;
+ session->recovery_tmo = 120;
+- session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+- INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+- INIT_WORK(&session->block_work, __iscsi_block_session);
+- INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
+- INIT_WORK(&session->scan_work, iscsi_scan_session);
+- spin_lock_init(&session->lock);
+
+ /* this is released in the dev's release function */
+ scsi_host_get(shost);
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id = target_id;
++ struct iscsi_host *ihost;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+ err = device_add(&session->dev);
+ if (err) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "could not register session's dev\n");
++ dev_printk(KERN_ERR, &session->dev, "iscsi: could not "
++ "register session's dev\n");
+ goto release_host;
+ }
+ transport_register_device(&session->dev);
+
+- spin_lock_irqsave(&sesslock, flags);
+- list_add(&session->sess_list, &sesslist);
+- spin_unlock_irqrestore(&sesslock, flags);
+-
+- iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
+ return 0;
+
+ release_host:
+@@ -750,18 +328,17 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+- * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -773,65 +350,19 @@ iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_create_session);
+
+-static void iscsi_conn_release(struct device *dev)
+-{
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
+- struct device *parent = conn->dev.parent;
+-
+- kfree(conn);
+- put_device(parent);
+-}
+-
+-static int iscsi_is_conn_dev(const struct device *dev)
+-{
+- return dev->release == iscsi_conn_release;
+-}
+-
+-static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+-{
+- if (!iscsi_is_conn_dev(dev))
+- return 0;
+- return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+-}
+-
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+- int err;
+-
+- spin_lock_irqsave(&sesslock, flags);
+- list_del(&session->sess_list);
+- spin_unlock_irqrestore(&sesslock, flags);
++ struct iscsi_host *ihost = shost->shost_data;
+
+- /* make sure there are no blocks/unblocks queued */
+- flush_workqueue(iscsi_eh_timer_workq);
+- /* make sure the timedout callout is not running */
+ if (!cancel_delayed_work(&session->recovery_work))
+- flush_workqueue(iscsi_eh_timer_workq);
+- /*
+- * If we are blocked let commands flow again. The lld or iscsi
+- * layer should set up the queuecommand to fail commands.
+- * We assume that LLD will not be calling block/unblock while
+- * removing the session.
+- */
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_FREE;
+- spin_unlock_irqrestore(&session->lock, flags);
++ flush_scheduled_work();
+
+- scsi_target_unblock(&session->dev);
+- /* flush running scans then delete devices */
+- flush_workqueue(ihost->scan_workq);
+- __iscsi_unbind_session(&session->unbind_work);
++ mutex_lock(&ihost->mutex);
++ list_del(&session->host_list);
++ mutex_unlock(&ihost->mutex);
+
+- /* hw iscsi may not have removed all connections from session */
+- err = device_for_each_child(&session->dev, NULL,
+- iscsi_iter_destroy_conn_fn);
+- if (err)
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Could not delete all connections "
+- "for session. Error %d.\n", err);
++ scsi_remove_target(&session->dev);
+
+ transport_unregister_device(&session->dev);
+ device_del(&session->dev);
+@@ -840,9 +371,9 @@ EXPORT_SYMBOL_GPL(iscsi_remove_session);
+
+ void iscsi_free_session(struct iscsi_cls_session *session)
+ {
+- iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
+ put_device(&session->dev);
+ }
++
+ EXPORT_SYMBOL_GPL(iscsi_free_session);
+
+ /**
+@@ -851,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session);
+ *
+ * Can be called by a LLD or iscsi_transport. There must not be
+ * any running connections.
+- */
++ **/
+ int iscsi_destroy_session(struct iscsi_cls_session *session)
+ {
+ iscsi_remove_session(session);
+@@ -860,10 +391,23 @@ int iscsi_destroy_session(struct iscsi_cls_session *session)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+
++static void iscsi_conn_release(struct device *dev)
++{
++ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
++ struct device *parent = conn->dev.parent;
++
++ kfree(conn);
++ put_device(parent);
++}
++
++static int iscsi_is_conn_dev(const struct device *dev)
++{
++ return dev->release == iscsi_conn_release;
++}
++
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -874,19 +418,19 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * for software iscsi we could be trying to preallocate a connection struct
+ * in which case there could be two connection structs and cid would be
+ * non-zero.
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+- unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -903,16 +447,11 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+ conn->dev.release = iscsi_conn_release;
+ err = device_register(&conn->dev);
+ if (err) {
+- iscsi_cls_session_printk(KERN_ERR, session, "could not "
+- "register connection's dev\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register "
++ "connection's dev\n");
+ goto release_parent_ref;
+ }
+ transport_register_device(&conn->dev);
+-
+- spin_lock_irqsave(&connlock, flags);
+- list_add(&conn->conn_list, &connlist);
+- conn->active = 1;
+- spin_unlock_irqrestore(&connlock, flags);
+ return conn;
+
+ release_parent_ref:
+@@ -926,23 +465,17 @@ EXPORT_SYMBOL_GPL(iscsi_create_conn);
+
+ /**
+ * iscsi_destroy_conn - destroy iscsi class connection
+- * @conn: iscsi cls session
++ * @session: iscsi cls session
+ *
+ * This can be called from a LLD or iscsi_transport.
+- */
++ **/
+ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+ {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&connlock, flags);
+- conn->active = 0;
+- list_del(&conn->conn_list);
+- spin_unlock_irqrestore(&connlock, flags);
+-
+ transport_unregister_device(&conn->dev);
+ device_unregister(&conn->dev);
+ return 0;
+ }
++
+ EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+
+ /*
+@@ -1011,8 +544,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
+- iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
+- "control PDU: OOM\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
++ "control PDU: OOM\n");
+ return -ENOMEM;
+ }
+
+@@ -1045,8 +578,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+- iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
+- "conn error (%d)\n", error);
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored "
++ "conn error (%d)\n", error);
+ return;
+ }
+
+@@ -1060,8 +593,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+
+ iscsi_broadcast_skb(skb, GFP_ATOMIC);
+
+- iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
+- error);
++ dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
++ error);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_error);
+
+@@ -1076,10 +609,12 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
+ int t = done ? NLMSG_DONE : type;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+- if (!skb) {
+- printk(KERN_ERR "Could not allocate skb to send reply.\n");
+- return -ENOMEM;
+- }
++ /*
++ * FIXME:
++ * user is supposed to react on iferror == -ENOMEM;
++ * see iscsi_if_rx().
++ */
++ BUG_ON(!skb);
+
+ nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
+ nlh->nlmsg_flags = flags;
+@@ -1116,8 +651,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+
+ skbstat = alloc_skb(len, GFP_ATOMIC);
+ if (!skbstat) {
+- iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
+- "deliver stats: OOM\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: can not "
++ "deliver stats: OOM\n");
+ return -ENOMEM;
+ }
+
+@@ -1152,87 +687,145 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+ }
+
+ /**
+- * iscsi_session_event - send session destr. completion event
+- * @session: iscsi class session
+- * @event: type of event
+- */
+-int iscsi_session_event(struct iscsi_cls_session *session,
+- enum iscsi_uevent_e event)
++ * iscsi_if_destroy_session_done - send session destr. completion event
++ * @conn: last connection for session
++ *
++ * This is called by HW iscsi LLDs to notify userpsace that its HW has
++ * removed a session.
++ **/
++int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
+ {
+ struct iscsi_internal *priv;
++ struct iscsi_cls_session *session;
+ struct Scsi_Host *shost;
+ struct iscsi_uevent *ev;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
++ unsigned long flags;
+ int rc, len = NLMSG_SPACE(sizeof(*ev));
+
+- priv = iscsi_if_transport_lookup(session->transport);
++ priv = iscsi_if_transport_lookup(conn->transport);
+ if (!priv)
+ return -EINVAL;
++
++ session = iscsi_dev_to_session(conn->dev.parent);
+ shost = iscsi_session_to_shost(session);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Cannot notify userspace of session "
+- "event %u\n", event);
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event\n");
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = NLMSG_DATA(nlh);
+- ev->transport_handle = iscsi_handle(session->transport);
++ ev->transport_handle = iscsi_handle(conn->transport);
++ ev->type = ISCSI_KEVENT_DESTROY_SESSION;
++ ev->r.d_session.host_no = shost->host_no;
++ ev->r.d_session.sid = session->sid;
+
+- ev->type = event;
+- switch (event) {
+- case ISCSI_KEVENT_DESTROY_SESSION:
+- ev->r.d_session.host_no = shost->host_no;
+- ev->r.d_session.sid = session->sid;
+- break;
+- case ISCSI_KEVENT_CREATE_SESSION:
+- ev->r.c_session_ret.host_no = shost->host_no;
+- ev->r.c_session_ret.sid = session->sid;
+- break;
+- case ISCSI_KEVENT_UNBIND_SESSION:
+- ev->r.unbind_session.host_no = shost->host_no;
+- ev->r.unbind_session.sid = session->sid;
+- break;
+- default:
+- iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
+- "%u.\n", event);
+- kfree_skb(skb);
++ /*
++ * this will occur if the daemon is not up, so we just warn
++ * the user and when the daemon is restarted it will handle it
++ */
++ rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
++ if (rc < 0)
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session destruction event. Check iscsi daemon\n");
++
++ spin_lock_irqsave(&sesslock, flags);
++ list_del(&session->sess_list);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ spin_lock_irqsave(&connlock, flags);
++ conn->active = 0;
++ list_del(&conn->conn_list);
++ spin_unlock_irqrestore(&connlock, flags);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(iscsi_if_destroy_session_done);
++
++/**
++ * iscsi_if_create_session_done - send session creation completion event
++ * @conn: leading connection for session
++ *
++ * This is called by HW iscsi LLDs to notify userpsace that its HW has
++ * created a session or a existing session is back in the logged in state.
++ **/
++int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
++{
++ struct iscsi_internal *priv;
++ struct iscsi_cls_session *session;
++ struct Scsi_Host *shost;
++ struct iscsi_uevent *ev;
++ struct sk_buff *skb;
++ struct nlmsghdr *nlh;
++ unsigned long flags;
++ int rc, len = NLMSG_SPACE(sizeof(*ev));
++
++ priv = iscsi_if_transport_lookup(conn->transport);
++ if (!priv)
+ return -EINVAL;
++
++ session = iscsi_dev_to_session(conn->dev.parent);
++ shost = iscsi_session_to_shost(session);
++
++ skb = alloc_skb(len, GFP_KERNEL);
++ if (!skb) {
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event\n");
++ return -ENOMEM;
+ }
+
++ nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
++ ev = NLMSG_DATA(nlh);
++ ev->transport_handle = iscsi_handle(conn->transport);
++ ev->type = ISCSI_UEVENT_CREATE_SESSION;
++ ev->r.c_session_ret.host_no = shost->host_no;
++ ev->r.c_session_ret.sid = session->sid;
++
+ /*
+ * this will occur if the daemon is not up, so we just warn
+ * the user and when the daemon is restarted it will handle it
+ */
+ rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
+ if (rc < 0)
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Cannot notify userspace of session "
+- "event %u. Check iscsi daemon\n",
+- event);
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event. Check iscsi daemon\n");
++
++ spin_lock_irqsave(&sesslock, flags);
++ list_add(&session->sess_list, &sesslist);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ spin_lock_irqsave(&connlock, flags);
++ list_add(&conn->conn_list, &connlist);
++ conn->active = 1;
++ spin_unlock_irqrestore(&connlock, flags);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_session_event);
++EXPORT_SYMBOL_GPL(iscsi_if_create_session_done);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ unsigned long flags;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ spin_lock_irqsave(&sesslock, flags);
++ list_add(&session->sess_list, &sesslist);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1242,34 +835,47 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
+ struct iscsi_cls_conn *conn;
+ struct iscsi_cls_session *session;
++ unsigned long flags;
+
+ session = iscsi_session_lookup(ev->u.c_conn.sid);
+ if (!session) {
+- printk(KERN_ERR "iscsi: invalid session %d.\n",
++ printk(KERN_ERR "iscsi: invalid session %d\n",
+ ev->u.c_conn.sid);
+ return -EINVAL;
+ }
+
+ conn = transport->create_conn(session, ev->u.c_conn.cid);
+ if (!conn) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "couldn't create a new connection.");
++ printk(KERN_ERR "iscsi: couldn't create a new "
++ "connection for session %d\n",
++ session->sid);
+ return -ENOMEM;
+ }
+
+ ev->r.c_conn_ret.sid = session->sid;
+ ev->r.c_conn_ret.cid = conn->cid;
++
++ spin_lock_irqsave(&connlock, flags);
++ list_add(&conn->conn_list, &connlist);
++ conn->active = 1;
++ spin_unlock_irqrestore(&connlock, flags);
++
+ return 0;
+ }
+
+ static int
+ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
++ unsigned long flags;
+ struct iscsi_cls_conn *conn;
+
+ conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
+ if (!conn)
+ return -EINVAL;
++ spin_lock_irqsave(&connlock, flags);
++ conn->active = 0;
++ list_del(&conn->conn_list);
++ spin_unlock_irqrestore(&connlock, flags);
+
+ if (transport->destroy_conn)
+ transport->destroy_conn(conn);
+@@ -1307,7 +913,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +922,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1353,50 +947,15 @@ static int
+ iscsi_tgt_dscvr(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+ {
+- struct Scsi_Host *shost;
+ struct sockaddr *dst_addr;
+- int err;
+
+ if (!transport->tgt_dscvr)
+ return -EINVAL;
+
+- shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
+- if (IS_ERR(shost)) {
+- printk(KERN_ERR "target discovery could not find host no %u\n",
+- ev->u.tgt_dscvr.host_no);
+- return -ENODEV;
+- }
+-
+-
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
+- ev->u.tgt_dscvr.enable, dst_addr);
+- scsi_host_put(shost);
+- return err;
+-}
+-
+-static int
+-iscsi_set_host_param(struct iscsi_transport *transport,
+- struct iscsi_uevent *ev)
+-{
+- char *data = (char*)ev + sizeof(*ev);
+- struct Scsi_Host *shost;
+- int err;
+-
+- if (!transport->set_host_param)
+- return -ENOSYS;
+-
+- shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+- if (IS_ERR(shost)) {
+- printk(KERN_ERR "set_host_param could not find host no %u\n",
+- ev->u.set_host_param.host_no);
+- return -ENODEV;
+- }
+-
+- err = transport->set_host_param(shost, ev->u.set_host_param.param,
+- data, ev->u.set_host_param.len);
+- scsi_host_put(shost);
+- return err;
++ return transport->tgt_dscvr(ev->u.tgt_dscvr.type,
++ ev->u.tgt_dscvr.host_no,
++ ev->u.tgt_dscvr.enable, dst_addr);
+ }
+
+ static int
+@@ -1408,7 +967,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
++ unsigned long flags;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,35 +981,17 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+- if (session)
++ if (session) {
++ spin_lock_irqsave(&sesslock, flags);
++ list_del(&session->sess_list);
++ spin_unlock_irqrestore(&sesslock, flags);
++
+ transport->destroy_session(session);
+- else
+- err = -EINVAL;
+- break;
+- case ISCSI_UEVENT_UNBIND_SESSION:
+- session = iscsi_session_lookup(ev->u.d_session.sid);
+- if (session)
+- iscsi_unbind_session(session);
+- else
++ } else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_CREATE_CONN:
+@@ -1508,11 +1049,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ case ISCSI_UEVENT_TGT_DSCVR:
+ err = iscsi_tgt_dscvr(transport, ev);
+ break;
+- case ISCSI_UEVENT_SET_HOST_PARAM:
+- err = iscsi_set_host_param(transport, ev);
+- break;
+ default:
+- err = -ENOSYS;
++ err = -EINVAL;
+ break;
+ }
+
+@@ -1521,55 +1059,70 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ }
+
+ /*
+- * Get message from skb. Each message is processed by iscsi_if_recv_msg.
+- * Malformed skbs with wrong lengths or invalid creds are not processed.
++ * Get message from skb (based on rtnetlink_rcv_skb). Each message is
++ * processed by iscsi_if_recv_msg. Malformed skbs with wrong lengths or
++ * invalid creds are discarded silently.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1130,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1601,68 +1153,43 @@ iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
+ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
+ iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
+ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+-iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+-iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
++
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
+
+ /*
+ * iSCSI session attrs
+ */
+-#define iscsi_session_attr_show(param, perm) \
++#define iscsi_session_attr_show(param) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+- \
+- if (perm && !capable(CAP_SYS_ADMIN)) \
+- return -EACCES; \
+ return t->get_session_param(session, param, buf); \
+ }
+
+-#define iscsi_session_attr(field, param, perm) \
+- iscsi_session_attr_show(param, perm) \
++#define iscsi_session_attr(field, param) \
++ iscsi_session_attr_show(param) \
+ static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
+ NULL);
+
+-iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
+-iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
+-iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
+-iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
+-iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
+-iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
+-iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
+-iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
+-iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
+-iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
+-iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
+-iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
+-iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
+-iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+-iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+-iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+-iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+-
+-static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
+-{
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+- return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+-}
+-static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+- NULL);
++iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME);
++iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN);
++iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T);
++iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN);
++iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST);
++iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST);
++iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN);
++iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN);
++iscsi_session_attr(erl, ISCSI_PARAM_ERL);
++iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT);
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1672,32 +1199,9 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
+ NULL)
+ iscsi_priv_session_attr(recovery_tmo, "%d");
+
+-/*
+- * iSCSI host attrs
+- */
+-#define iscsi_host_attr_show(param) \
+-static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
+-{ \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
+- struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+- return priv->iscsi_transport->get_host_param(shost, param, buf); \
+-}
+-
+-#define iscsi_host_attr(field, param) \
+- iscsi_host_attr_show(param) \
+-static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
+- NULL);
+-
+-iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
+-iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
+-iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
+-iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+-
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1209,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,15 +1217,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
+- count++; \
+- } \
+-} while (0)
+-
+-#define SETUP_HOST_RD_ATTR(field, param_flag) \
+-do { \
+- if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,34 +1307,25 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
++ priv->host_attrs[0] = NULL;
+ transport_container_register(&priv->t.host_attrs);
+
+- SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+- SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
+- SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
+- SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
+- BUG_ON(count > ISCSI_HOST_ATTRS);
+- priv->host_attrs[count] = NULL;
+- count = 0;
+-
+ /* connection parameters */
+ priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
+ priv->conn_cont.ac.class = &iscsi_connection_class.class;
+@@ -1856,8 +1343,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
+ SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
+ SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
+- SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO);
+- SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO);
+
+ BUG_ON(count > ISCSI_CONN_ATTRS);
+ priv->conn_attrs[count] = NULL;
+@@ -1879,17 +1364,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
+ SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
+ SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
+- SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
+- SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
+- SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
+- SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
+- SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+- SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+- SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+- SETUP_PRIV_SESSION_RD_ATTR(state);
+
+ BUG_ON(count > ISCSI_SESSION_ATTRS);
+ priv->session_attrs[count] = NULL;
+@@ -1901,9 +1376,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1404,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1425,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,29 +1437,21 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+ }
+
+- iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+- if (!iscsi_eh_timer_workq)
+- goto release_nls;
+-
+ return 0;
+
+-release_nls:
+- netlink_kernel_release(nls);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -1997,12 +1459,10 @@ unregister_transport_class:
+
+ static void __exit iscsi_transport_exit(void)
+ {
+- destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..55ebf03 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -48,17 +48,12 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14,
+
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+- ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+- ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+ ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2,
+ ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3,
+ ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
+- ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5,
+- ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6,
+ };
+
+ enum iscsi_tgt_dscvr {
+@@ -76,15 +71,7 @@ struct iscsi_uevent {
+ /* messages u -> k */
+ struct msg_create_session {
+ uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -149,11 +136,6 @@ struct iscsi_uevent {
+ */
+ uint32_t enable;
+ } tgt_dscvr;
+- struct msg_set_host_param {
+- uint32_t host_no;
+- uint32_t param; /* enum iscsi_host_param */
+- uint32_t len;
+- } set_host_param;
+ } u;
+ union {
+ /* messages k -> u */
+@@ -166,10 +148,6 @@ struct iscsi_uevent {
+ uint32_t sid;
+ uint32_t cid;
+ } c_conn_ret;
+- struct msg_unbind_session {
+- uint32_t sid;
+- uint32_t host_no;
+- } unbind_session;
+ struct msg_recv_req {
+ uint32_t sid;
+ uint32_t cid;
+@@ -245,78 +223,42 @@ enum iscsi_param {
+ ISCSI_PARAM_CONN_PORT,
+ ISCSI_PARAM_CONN_ADDRESS,
+
+- ISCSI_PARAM_USERNAME,
+- ISCSI_PARAM_USERNAME_IN,
+- ISCSI_PARAM_PASSWORD,
+- ISCSI_PARAM_PASSWORD_IN,
+-
+- ISCSI_PARAM_FAST_ABORT,
+- ISCSI_PARAM_ABORT_TMO,
+- ISCSI_PARAM_LU_RESET_TMO,
+- ISCSI_PARAM_HOST_RESET_TMO,
+-
+- ISCSI_PARAM_PING_TMO,
+- ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
+-
+-/* iSCSI HBA params */
+-enum iscsi_host_param {
+- ISCSI_HOST_PARAM_HWADDRESS,
+- ISCSI_HOST_PARAM_INITIATOR_NAME,
+- ISCSI_HOST_PARAM_NETDEV_NAME,
+- ISCSI_HOST_PARAM_IPADDRESS,
+- ISCSI_HOST_PARAM_MAX,
+-};
+-
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+@@ -330,9 +272,6 @@ enum iscsi_host_param {
+ #define CAP_MULTI_CONN 0x40
+ #define CAP_TEXT_NEGO 0x80
+ #define CAP_MARKERS 0x100
+-#define CAP_FW_DB 0x200
+-#define CAP_SENDTARGETS_OFFLOAD 0x400
+-#define CAP_DATA_PATH_OFFLOAD 0x800
+
+ /*
+ * These flags describes reason of stop_conn() call
+diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
+index f2a2c11..8d1e4e8 100644
+--- a/include/scsi/iscsi_proto.h
++++ b/include/scsi/iscsi_proto.h
+@@ -21,16 +21,13 @@
+ #ifndef ISCSI_PROTO_H
+ #define ISCSI_PROTO_H
+
+-#include <linux/types.h>
+-#include <scsi/scsi.h>
+-
+ #define ISCSI_DRAFT20_VERSION 0x00
+
+ /* default iSCSI listen port for incoming connections */
+ #define ISCSI_LISTEN_PORT 3260
+
+ /* Padding word length */
+-#define ISCSI_PAD_LEN 4
++#define PAD_WORD_LEN 4
+
+ /*
+ * useful common(control and data pathes) macro
+@@ -46,8 +43,8 @@
+ /* initiator tags; opaque for target */
+ typedef uint32_t __bitwise__ itt_t;
+ /* below makes sense only for initiator that created this tag */
+-#define build_itt(itt, age) ((__force itt_t)\
+- ((itt) | ((age) << ISCSI_AGE_SHIFT)))
++#define build_itt(itt, id, age) ((__force itt_t)\
++ ((itt) | ((id) << ISCSI_CID_SHIFT) | ((age) << ISCSI_AGE_SHIFT)))
+ #define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK)
+ #define RESERVED_ITT ((__force itt_t)0xffffffff)
+
+@@ -113,7 +110,6 @@ struct iscsi_ahs_hdr {
+
+ #define ISCSI_AHSTYPE_CDB 1
+ #define ISCSI_AHSTYPE_RLENGTH 2
+-#define ISCSI_CDB_SIZE 16
+
+ /* iSCSI PDU Header */
+ struct iscsi_cmd {
+@@ -127,7 +123,7 @@ struct iscsi_cmd {
+ __be32 data_length;
+ __be32 cmdsn;
+ __be32 exp_statsn;
+- uint8_t cdb[ISCSI_CDB_SIZE]; /* SCSI Command Block */
++ uint8_t cdb[16]; /* SCSI Command Block */
+ /* Additional Data (Command Dependent) */
+ };
+
+@@ -151,15 +147,6 @@ struct iscsi_rlength_ahdr {
+ __be32 read_length;
+ };
+
+-/* Extended CDB AHS */
+-struct iscsi_ecdb_ahdr {
+- __be16 ahslength; /* CDB length - 15, including reserved byte */
+- uint8_t ahstype;
+- uint8_t reserved;
+- /* 4-byte aligned extended CDB spillover */
+- uint8_t ecdb[SCSI_MAX_VARLEN_CDB_SIZE - ISCSI_CDB_SIZE];
+-};
+-
+ /* SCSI Response Header */
+ struct iscsi_cmd_rsp {
+ uint8_t opcode;
+@@ -613,8 +600,6 @@ struct iscsi_reject {
+ #define ISCSI_MIN_MAX_BURST_LEN 512
+ #define ISCSI_MAX_MAX_BURST_LEN 16777215
+
+-#define ISCSI_DEF_TIME2WAIT 2
+-
+ /************************* RFC 3720 End *****************************/
+
+ #endif /* ISCSI_PROTO_H */
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..ea0816d 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -51,68 +48,69 @@ struct device;
+ #define debug_scsi(fmt...)
+ #endif
+
+-#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_XMIT_CMDS_MAX 128 /* must be power of 2 */
++#define ISCSI_MGMT_CMDS_MAX 32 /* must be power of 2 */
++#define ISCSI_CONN_MAX 1
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+
+ /* Task Mgmt states */
+-enum {
+- TMF_INITIAL,
+- TMF_QUEUED,
+- TMF_SUCCESS,
+- TMF_FAILED,
+- TMF_TIMEDOUT,
+- TMF_NOT_FOUND,
+-};
++#define TMABORT_INITIAL 0x0
++#define TMABORT_SUCCESS 0x1
++#define TMABORT_FAILED 0x2
++#define TMABORT_TIMEDOUT 0x3
++#define TMABORT_NOT_FOUND 0x4
+
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
++#define ISCSI_CID_SHIFT 12
++#define ISCSI_CID_MASK (0xffff << ISCSI_CID_SHIFT)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+-#define ISCSI_ADDRESS_BUF_LEN 64
+-
+-enum {
+- /* this is the maximum possible storage for AHSs */
+- ISCSI_MAX_AHS_SIZE = sizeof(struct iscsi_ecdb_ahdr) +
+- sizeof(struct iscsi_rlength_ahdr),
+- ISCSI_DIGEST_SIZE = sizeof(__u32),
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ int data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
+ };
+
+-
+ enum {
+ ISCSI_TASK_COMPLETED,
+ ISCSI_TASK_PENDING,
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+- * Because LLDs allocate their hdr differently, this is a pointer
+- * and length to that storage. It must be setup at session
+- * creation time.
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
+ */
+ struct iscsi_cmd *hdr;
+- unsigned short hdr_max;
+- unsigned short hdr_len; /* accumulated size of hdr used */
+ int itt; /* this ITT */
++ int datasn; /* DataSN */
+
+ uint32_t unsol_datasn;
+- unsigned imm_count; /* imm-data (bytes) */
+- unsigned unsol_count; /* unsolicited (bytes)*/
++ int imm_count; /* imm-data (bytes) */
++ int unsol_count; /* unsolicited (bytes)*/
+ /* offset in unsolicited stream (bytes); */
+- unsigned unsol_offset;
+- unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
++ int unsol_offset;
++ int data_count; /* remaining Data-Out */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
++ int total_length;
+ struct iscsi_conn *conn; /* used connection */
++ struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */
+
+ /* state set/tested under session->lock */
+ int state;
+@@ -121,33 +119,19 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
+-{
+- return (void*)task->hdr + task->hdr_len;
+-}
+-
+-/* Connection's states */
+-enum {
+- ISCSI_CONN_INITIAL_STAGE,
+- ISCSI_CONN_STARTED,
+- ISCSI_CONN_STOPPED,
+- ISCSI_CONN_CLEANUP_WAIT,
+-};
+-
+ struct iscsi_conn {
+ struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+- struct timer_list transport_timer;
+- unsigned long last_recv;
+- unsigned long last_ping;
+- int ping_timeout;
+- int recv_timeout;
+- struct iscsi_task *ping_task;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,28 +147,35 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+- struct list_head mgmtqueue; /* mgmt (control) xmit queue */
++ struct kfifo *immqueue; /* immediate xmit queue */
++ struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */
+ struct list_head mgmt_run_list; /* list of control tasks */
+ struct list_head xmitqueue; /* data-path cmd queue */
+ struct list_head run_list; /* list of cmds in progress */
+- struct list_head requeue; /* tasks needing another run */
+ struct work_struct xmitwork; /* per-conn. xmit workqueue */
++ /*
++ * serializes connection xmit, access to kfifos:
++ * xmitqueue, immqueue, mgmtqueue
++ */
++ struct mutex xmitmutex;
++
+ unsigned long suspend_tx; /* suspend Tx */
+ unsigned long suspend_rx; /* suspend Rx */
+
+ /* abort */
+ wait_queue_head_t ehwait; /* used in eh_abort() */
+ struct iscsi_tm tmhdr;
+- struct timer_list tmf_timer;
+- int tmf_state; /* see TMF_INITIAL, etc.*/
++ struct timer_list tmabort_timer;
++ int tmabort_state; /* see TMABORT_INITIAL, etc.*/
+
+ /* negotiated params */
+- unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
+- unsigned max_xmit_dlength; /* target_max_recv_dsl */
++ int max_recv_dlength; /* initiator_max_recv_dsl*/
++ int max_xmit_dlength; /* target_max_recv_dsl */
+ int hdrdgst_en;
+ int datadgst_en;
+ int ifmarker_en;
+@@ -192,9 +183,6 @@ struct iscsi_conn {
+ /* values userspace uses to id a conn */
+ int persistent_port;
+ char *persistent_address;
+- /* remote portal currently connected to */
+- int portal_port;
+- char portal_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,65 +197,34 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+-struct iscsi_pool {
++struct iscsi_queue {
+ struct kfifo *queue; /* FIFO Queue */
+ void **pool; /* Pool of elements */
+ int max; /* Max number of elements */
+ };
+
+-/* Session's states */
+-enum {
+- ISCSI_STATE_FREE = 1,
+- ISCSI_STATE_LOGGED_IN,
+- ISCSI_STATE_FAILED,
+- ISCSI_STATE_TERMINATE,
+- ISCSI_STATE_IN_RECOVERY,
+- ISCSI_STATE_RECOVERY_FAILED,
+- ISCSI_STATE_LOGGING_OUT,
+-};
+-
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+- /*
+- * Syncs up the scsi eh thread with the iscsi eh thread when sending
+- * task management functions. This must be taken before the session
+- * and recv lock.
+- */
+- struct mutex eh_mutex;
+-
+ /* iSCSI session-wide sequencing */
+ uint32_t cmdsn;
+ uint32_t exp_cmdsn;
+ uint32_t max_cmdsn;
+
+- /* This tracks the reqs queued into the initiator */
+- uint32_t queued_cmdsn;
+-
+ /* configuration */
+- int abort_timeout;
+- int lu_reset_timeout;
+ int initial_r2t_en;
+- unsigned max_r2t;
++ int max_r2t;
+ int imm_data_en;
+- unsigned first_burst;
+- unsigned max_burst;
++ int first_burst;
++ int max_burst;
+ int time2wait;
+ int time2retain;
+ int pdu_inorder_en;
+ int dataseq_inorder_en;
+ int erl;
+- int fast_abort;
+ int tpgt;
+- char *username;
+- char *username_in;
+- char *password;
+- char *password_in;
+ char *targetname;
+- char *ifacename;
+- char *initiatorname;
++
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +238,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
+- struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
++ struct iscsi_queue cmdpool; /* PDU's pool */
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -303,48 +252,31 @@ struct iscsi_host {
+ extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth);
+ extern int iscsi_eh_abort(struct scsi_cmnd *sc);
+ extern int iscsi_eh_host_reset(struct scsi_cmnd *sc);
+-extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
+ /*
+- * iSCSI host helpers.
+- */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+-extern int iscsi_host_set_param(struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf,
+- int buflen);
+-extern int iscsi_host_get_param(struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+-
+-/*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
+-#define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,17 +285,13 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+-
+-#define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+-extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern int iscsi_check_assign_cmdsn(struct iscsi_session *,
++ struct iscsi_nopin *);
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+@@ -371,34 +299,13 @@ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
+
+ /*
+ * generic helpers
+ */
+-extern void iscsi_pool_free(struct iscsi_pool *);
+-extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int);
+-
+-/*
+- * inline functions to deal with padding.
+- */
+-static inline unsigned int
+-iscsi_padded(unsigned int len)
+-{
+- return (len + ISCSI_PAD_LEN - 1) & ~(ISCSI_PAD_LEN - 1);
+-}
+-
+-static inline unsigned int
+-iscsi_padding(unsigned int len)
+-{
+- len &= (ISCSI_PAD_LEN - 1);
+- if (len)
+- len = ISCSI_PAD_LEN - len;
+- return len;
+-}
++extern void iscsi_pool_free(struct iscsi_queue *, void **);
++extern int iscsi_pool_init(struct iscsi_queue *, int, void ***, int);
+
+ #endif
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..d5c218d 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -24,17 +24,15 @@
+ #define SCSI_TRANSPORT_ISCSI_H
+
+ #include <linux/device.h>
+-#include <linux/list.h>
+-#include <linux/mutex.h>
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +56,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -84,11 +79,17 @@ struct iscsi_transport {
+ char *name;
+ unsigned int caps;
+ /* LLD sets this to indicate what values it can export to sysfs */
+- uint64_t param_mask;
+- uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ unsigned int param_mask;
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -104,25 +105,26 @@ struct iscsi_transport {
+ enum iscsi_param param, char *buf);
+ int (*get_session_param) (struct iscsi_cls_session *session,
+ enum iscsi_param param, char *buf);
+- int (*get_host_param) (struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf);
+- int (*set_host_param) (struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf,
+- int buflen);
+ int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ void (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask,
++ char *data, uint32_t data_size);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
+- int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
++ int (*tgt_dscvr) (enum iscsi_tgt_dscvr type, uint32_t host_no,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+
+@@ -139,6 +141,13 @@ extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
+ extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size);
+
++
++/* Connection's states */
++#define ISCSI_CONN_INITIAL_STAGE 0
++#define ISCSI_CONN_STARTED 1
++#define ISCSI_CONN_STOPPED 2
++#define ISCSI_CONN_CLEANUP_WAIT 3
++
+ struct iscsi_cls_conn {
+ struct list_head conn_list; /* item in connlist */
+ void *dd_data; /* LLD private data */
+@@ -152,34 +161,25 @@ struct iscsi_cls_conn {
+ #define iscsi_dev_to_conn(_dev) \
+ container_of(_dev, struct iscsi_cls_conn, dev)
+
+-#define iscsi_conn_to_session(_conn) \
+- iscsi_dev_to_session(_conn->dev.parent)
+-
+-/* iscsi class session state */
+-enum {
+- ISCSI_SESSION_LOGGED_IN,
+- ISCSI_SESSION_FAILED,
+- ISCSI_SESSION_FREE,
+-};
+-
+-#define ISCSI_MAX_TARGET -1
++/* Session's states */
++#define ISCSI_STATE_FREE 1
++#define ISCSI_STATE_LOGGED_IN 2
++#define ISCSI_STATE_FAILED 3
++#define ISCSI_STATE_TERMINATE 4
++#define ISCSI_STATE_IN_RECOVERY 5
++#define ISCSI_STATE_RECOVERY_FAILED 6
+
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+- spinlock_t lock;
+- struct work_struct block_work;
+- struct work_struct unblock_work;
+- struct work_struct scan_work;
+- struct work_struct unbind_work;
+
+ /* recovery fields */
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+- int state;
+ int sid; /* session id */
+ void *dd_data; /* LLD private data */
+ struct device dev; /* sysfs transport/container device */
+@@ -194,53 +194,31 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
+- atomic_t nr_scans;
++struct iscsi_host {
++ struct list_head sessions;
+ struct mutex mutex;
+- struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
+ };
+
+ /*
+ * session and connection functions that can be used by HW iSCSI LLDs
+ */
+-#define iscsi_cls_session_printk(prefix, _cls_session, fmt, a...) \
+- dev_printk(prefix, &(_cls_session)->dev, fmt, ##a)
+-
+-#define iscsi_cls_conn_printk(prefix, _cls_conn, fmt, a...) \
+- dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a)
+-
+-extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+-extern int iscsi_session_event(struct iscsi_cls_session *session,
+- enum iscsi_uevent_e event);
++extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn);
++extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+-extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
++
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U4/iscsi_02_add_to_2_6_9.patch b/kernel_patches/backport/2.6.9_U4/iscsi_02_add_to_2_6_9.patch
new file mode 100644
index 0000000..1f05d95
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iscsi_02_add_to_2_6_9.patch
@@ -0,0 +1,180 @@
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 4376840..11dfaf9 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -2145,7 +2145,6 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ static struct scsi_host_template iscsi_sht = {
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+- .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .max_sectors = 0xFFFF,
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index d37048c..60f5846 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1366,7 +1366,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
+ shost->max_lun = iscsit->max_lun;
+ shost->max_cmd_len = iscsit->max_cmd_len;
+ shost->transportt = scsit;
+- shost->transportt->create_work_queue = 1;
+ *hostno = shost->host_no;
+
+ session = iscsi_hostdata(shost->hostdata);
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 8133c22..f1c68f7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -65,6 +65,8 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define cdev_to_iscsi_internal(_cdev) \
+ container_of(_cdev, struct iscsi_internal, cdev)
+
++extern int attribute_container_init(void);
++
+ static void iscsi_transport_release(struct class_device *cdev)
+ {
+ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+@@ -80,6 +82,17 @@ static struct class iscsi_transport_class = {
+ .release = iscsi_transport_release,
+ };
+
++static void iscsi_host_class_release(struct class_device *class_dev)
++{
++ struct Scsi_Host *shost = transport_class_to_shost(class_dev);
++ put_device(&shost->shost_gendev);
++}
++
++struct class iscsi_host_class = {
++ .name = "iscsi_host",
++ .release = iscsi_host_class_release,
++};
++
+ static ssize_t
+ show_transport_handle(struct class_device *cdev, char *buf)
+ {
+@@ -115,10 +128,8 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct class_device *cdev)
++static int iscsi_setup_host(struct Scsi_Host *shost)
+ {
+- struct Scsi_Host *shost = dev_to_shost(dev);
+ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+@@ -127,12 +138,6 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ return 0;
+ }
+
+-static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+- "iscsi_host",
+- iscsi_setup_host,
+- NULL,
+- NULL);
+-
+ static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+ "iscsi_session",
+ NULL,
+@@ -216,24 +221,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_host *ihost = shost->shost_data;
+- struct iscsi_cls_session *session;
+-
+- mutex_lock(&ihost->mutex);
+- list_for_each_entry(session, &ihost->sessions, host_list) {
+- if ((channel == SCAN_WILD_CARD || channel == 0) &&
+- (id == SCAN_WILD_CARD || id == session->target_id))
+- scsi_scan_target(&session->dev, 0,
+- session->target_id, lun, 1);
+- }
+- mutex_unlock(&ihost->mutex);
+-
+- return 0;
+-}
+-
+ static void session_recovery_timedout(struct work_struct *work)
+ {
+ struct iscsi_cls_session *session =
+@@ -362,8 +349,6 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
+ list_del(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+- scsi_remove_target(&session->dev);
+-
+ transport_unregister_device(&session->dev);
+ device_del(&session->dev);
+ }
+@@ -1269,24 +1254,6 @@ static int iscsi_conn_match(struct attribute_container *cont,
+ return &priv->conn_cont.ac == cont;
+ }
+
+-static int iscsi_host_match(struct attribute_container *cont,
+- struct device *dev)
+-{
+- struct Scsi_Host *shost;
+- struct iscsi_internal *priv;
+-
+- if (!scsi_is_host_device(dev))
+- return 0;
+-
+- shost = dev_to_shost(dev);
+- if (!shost->transportt ||
+- shost->transportt->host_attrs.ac.class != &iscsi_host_class.class)
+- return 0;
+-
+- priv = to_iscsi_internal(shost->transportt);
+- return &priv->t.host_attrs.ac == cont;
+-}
+-
+ struct scsi_transport_template *
+ iscsi_register_transport(struct iscsi_transport *tt)
+ {
+@@ -1306,7 +1273,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ INIT_LIST_HEAD(&priv->list);
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+- priv->t.user_scan = iscsi_user_scan;
+
+ priv->cdev.class = &iscsi_transport_class;
+ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
+@@ -1319,12 +1285,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ goto unregister_cdev;
+
+ /* host parameters */
+- priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+- priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+- priv->t.host_attrs.ac.match = iscsi_host_match;
++ priv->t.host_attrs = &priv->host_attrs[0];
++ priv->t.host_class = &iscsi_host_class;
++ priv->t.host_setup = iscsi_setup_host;
+ priv->t.host_size = sizeof(struct iscsi_host);
+- priv->host_attrs[0] = NULL;
+- transport_container_register(&priv->t.host_attrs);
+
+ /* connection parameters */
+ priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
+@@ -1402,7 +1366,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+
+ transport_container_unregister(&priv->conn_cont);
+ transport_container_unregister(&priv->session_cont);
+- transport_container_unregister(&priv->t.host_attrs);
+
+ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
+ class_device_unregister(&priv->cdev);
+@@ -1420,6 +1420,7 @@ static __init int iscsi_transport_init(void)
+ ISCSI_TRANSPORT_VERSION);
+
+ atomic_set(&iscsi_session_nr, 0);
++ attribute_container_init();
+
+ err = class_register(&iscsi_transport_class);
+ if (err)
+ return err;
diff --git a/kernel_patches/backport/2.6.9_U4/iscsi_03_add_session_wq.patch b/kernel_patches/backport/2.6.9_U4/iscsi_03_add_session_wq.patch
new file mode 100644
index 0000000..5a77c07
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iscsi_03_add_session_wq.patch
@@ -0,0 +1,76 @@
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index a6f2303..5d62cc0 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -612,7 +612,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (resume_tx) {
+ iser_dbg("%ld resuming tx\n",jiffies);
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ queue_work(conn->session->wq, &conn->xmitwork);
+ }
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index e8020a5..43e9128 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -828,7 +828,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ spin_unlock(&session->lock);
+
+- scsi_queue_work(host, &conn->xmitwork);
++ queue_work(session->wq, &conn->xmitwork);
+ return 0;
+
+ reject:
+@@ -928,7 +928,7 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ else
+ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
+
+- scsi_queue_work(session->host, &conn->xmitwork);
++ queue_work(session->wq, &conn->xmitwork);
+ return 0;
+ }
+
+@@ -1415,6 +1415,9 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
+ INIT_LIST_HEAD(&mtask->running);
+ }
+
++ session->wq = create_singlethread_workqueue("");
++ BUG_ON(!session->wq);
++
+ if (scsi_add_host(shost, NULL))
+ goto add_host_fail;
+
+@@ -1462,6 +1465,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+
+ kfree(session->targetname);
+
++ destroy_workqueue(session->wq);
++
+ iscsi_destroy_session(cls_session);
+ scsi_host_put(shost);
+ module_put(owner);
+@@ -1595,7 +1600,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+
+ /* flush queued up work because we free the connection below */
+- scsi_flush_work(session->host);
++ flush_workqueue(session->wq);
+
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index ea0816d..e8a95f5 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -244,6 +244,8 @@ struct iscsi_session {
+ int mgmtpool_max; /* size of mgmt array */
+ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
+ struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
++
++ struct workqueue_struct *wq;
+ };
+
+ /*
diff --git a/kernel_patches/backport/2.6.9_U4/iscsi_04_inet_sock_to_opt.patch b/kernel_patches/backport/2.6.9_U4/iscsi_04_inet_sock_to_opt.patch
new file mode 100644
index 0000000..1fb2376
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iscsi_04_inet_sock_to_opt.patch
@@ -0,0 +1,13 @@
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 905efc4..f73a743 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -2027,7 +2027,7 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct inet_sock *inet;
++ struct inet_opt *inet;
+ struct ipv6_pinfo *np;
+ struct sock *sk;
+ int len;
diff --git a/kernel_patches/backport/2.6.9_U4/iscsi_05_release_host_lock_before_eh.patch b/kernel_patches/backport/2.6.9_U4/iscsi_05_release_host_lock_before_eh.patch
new file mode 100644
index 0000000..c994506
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iscsi_05_release_host_lock_before_eh.patch
@@ -0,0 +1,60 @@
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 7db081b..211944e 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -968,12 +968,14 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ struct iscsi_conn *conn = session->leadconn;
+ int fail_session = 0;
+
++ spin_unlock_irq(host->host_lock);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+ failed:
+ debug_scsi("failing host reset: session terminated "
+ "[CID %d age %d]\n", conn->id, session->age);
+ spin_unlock_bh(&session->lock);
++ spin_lock_irq(host->host_lock);
+ return FAILED;
+ }
+
+@@ -1005,6 +1007,7 @@ failed:
+ else
+ goto failed;
+ spin_unlock_bh(&session->lock);
++ spin_lock_irq(host->host_lock);
+
+ return SUCCESS;
+ }
+@@ -1162,13 +1165,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ struct iscsi_conn *conn;
+ struct iscsi_session *session;
+ int rc;
++ struct Scsi_Host *shost = sc->device->host;
+
++ spin_unlock_irq(shost->host_lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ debug_scsi("sc never reached iscsi layer or it completed.\n");
++ spin_lock_irq(shost->host_lock);
+ return SUCCESS;
+ }
+
+@@ -1253,6 +1259,7 @@ success_cleanup:
+
+ success_rel_mutex:
+ mutex_unlock(&conn->xmitmutex);
++ spin_lock_irq(shost->host_lock);
+ return SUCCESS;
+
+ failed:
+@@ -1260,6 +1267,7 @@ failed:
+ mutex_unlock(&conn->xmitmutex);
+
+ debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
++ spin_lock_irq(shost->host_lock);
+ return FAILED;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_abort);
diff --git a/kernel_patches/backport/2.6.9_U4/iscsi_06_scsi_addons.patch b/kernel_patches/backport/2.6.9_U4/iscsi_06_scsi_addons.patch
new file mode 100644
index 0000000..a114696
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iscsi_06_scsi_addons.patch
@@ -0,0 +1,75 @@
+diff --git a/drivers/scsi/init.c b/drivers/scsi/init.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/init.c
+@@ -0,0 +1 @@
++#include "src/init.c"
+diff --git a/drivers/scsi/attribute_container.c b/drivers/scsi/attribute_container.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/attribute_container.c
+@@ -0,0 +1 @@
++#include "../drivers/base/attribute_container.c"
+diff --git a/drivers/scsi/transport_class.c b/drivers/scsi/transport_class.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/transport_class.c
+@@ -0,0 +1 @@
++#include "../drivers/base/transport_class.c"
+diff --git a/drivers/scsi/klist.c b/drivers/scsi/klist.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/klist.c
+@@ -0,0 +1 @@
++#include "../../lib/klist.c"
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi.c
+@@ -0,0 +1 @@
++#include "src/scsi.c"
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_lib.c
+@@ -0,0 +1 @@
++#include "src/scsi_lib.c"
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_scan.c
+@@ -0,0 +1 @@
++#include "src/scsi_scan.c"
+diff --git a/drivers/scsi/libiscsi_f.c b/drivers/scsi/libiscsi_f.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/libiscsi_f.c
+@@ -0,0 +1 @@
++#include "libiscsi.c"
+diff --git a/drivers/scsi/scsi_transport_iscsi_f.c b/drivers/scsi/scsi_transport_iscsi_f.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_transport_iscsi_f.c
+@@ -0,0 +1 @@
++#include "scsi_transport_iscsi.c"
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index e212608..3bf2015 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -3,2 +3,7 @@
+ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
+ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
++
++CFLAGS_attribute_container.o = $(BACKPORT_INCLUDES)/src/
++
++scsi_transport_iscsi-y := scsi_transport_iscsi_f.o scsi.o scsi_lib.o init.o klist.o attribute_container.o transport_class.o
++libiscsi-y := libiscsi_f.o scsi_scan.o
diff --git a/kernel_patches/backport/2.6.9_U4/iser_00_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.9_U4/iser_00_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..ff5d719
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_00_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From 12b757e92112750b4bc90cf8150d20484d684dcf Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 21 Aug 2008 14:28:56 +0300
+Subject: [PATCH] iser_sync_kernel_code_with_2.6.26
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U4/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch b/kernel_patches/backport/2.6.9_U4/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
new file mode 100644
index 0000000..101fdc6
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
@@ -0,0 +1,44 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..75ecabe 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -211,10 +211,10 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ int error = 0;
+
+ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(ctask->sc) == 0);
++ BUG_ON(ctask->sc->request_bufflen == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->itt, ctask->sc->request_bufflen,
+ ctask->imm_count, ctask->unsol_count);
+ }
+
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 5d62cc0..1ae80d8 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -349,12 +349,18 @@ int iser_send_command(struct iscsi_conn *conn,
+ else
+ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+- if (scsi_sg_count(sc)) { /* using a scatter list */
+- data_buf->buf = scsi_sglist(sc);
+- data_buf->size = scsi_sg_count(sc);
++ if (sc->use_sg) { /* using a scatter list */
++ data_buf->buf = sc->request_buffer;
++ data_buf->size = sc->use_sg;
++ } else if (sc->request_bufflen) {
++ /* using a single buffer - convert it into one entry SG */
++ sg_init_one(&data_buf->sg_single,
++ sc->request_buffer, sc->request_bufflen);
++ data_buf->buf = &data_buf->sg_single;
++ data_buf->size = 1;
+ }
+
+- data_buf->data_len = scsi_bufflen(sc);
++ data_buf->data_len = sc->request_bufflen;
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ err = iser_prepare_read_cmd(ctask, edtl);
diff --git a/kernel_patches/backport/2.6.9_U4/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch b/kernel_patches/backport/2.6.9_U4/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
new file mode 100644
index 0000000..7b21cba
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
@@ -0,0 +1,12 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..933429b 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -586,7 +586,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+- ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
diff --git a/kernel_patches/backport/2.6.9_U4/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch b/kernel_patches/backport/2.6.9_U4/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
new file mode 100644
index 0000000..d72eb5a
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
@@ -0,0 +1,74 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..7baac99 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -368,8 +368,7 @@ static struct iscsi_transport iscsi_iser_transport;
+ static struct iscsi_cls_session *
+ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct scsi_transport_template *scsit,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+@@ -380,13 +380,7 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iser_desc *desc;
+
+- /*
+- * we do not support setting can_queue cmd_per_lun from userspace yet
+- * because we preallocate so many resources
+- */
+ cls_session = iscsi_session_setup(iscsit, scsit,
+- ISCSI_DEF_XMIT_CMDS_MAX,
+- ISCSI_MAX_CMD_PER_LUN,
+ sizeof(struct iscsi_iser_cmd_task),
+ sizeof(struct iser_desc),
+ initial_cmdsn, &hn);
+@@ -550,7 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
++ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 1ee867b..671faff 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -105,7 +105,7 @@
+ #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
+ * SCSI_TMFUNC(2), LOGOUT(1) */
+
+-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
++#define ISER_QP_MAX_RECV_DTOS (ISCSI_XMIT_CMDS_MAX + \
+ ISER_MAX_RX_MISC_PDUS + \
+ ISER_MAX_TX_MISC_PDUS)
+
+@@ -117,7 +117,7 @@
+
+ #define ISER_INFLIGHT_DATAOUTS 8
+
+-#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
++#define ISER_QP_MAX_REQ_DTOS (ISCSI_XMIT_CMDS_MAX * \
+ (1 + ISER_INFLIGHT_DATAOUTS) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 654a4dc..f3d8ba5 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -154,8 +154,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+ params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
+ /* make the pool size twice the max number of SCSI commands *
+ * the ML is expected to queue, watermark for unmap at 50% */
+- params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
+- params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
++ params.pool_size = ISCSI_XMIT_CMDS_MAX * 2;
++ params.dirty_watermark = ISCSI_XMIT_CMDS_MAX;
+ params.cache = 0;
+ params.flush_function = NULL;
+ params.access = (IB_ACCESS_LOCAL_WRITE |
diff --git a/kernel_patches/backport/2.6.9_U4/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch b/kernel_patches/backport/2.6.9_U4/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
new file mode 100644
index 0000000..26fa09c
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
@@ -0,0 +1,38 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 8f7b859..5f82d6c 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -134,9 +134,18 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iser_ctask->command_sent = 0;
+ iser_ctask->iser_conn = iser_conn;
++ if (sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(sc->request_bufflen == 0);
++
++ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
++ ctask->itt, sc->request_bufflen, ctask->imm_count,
++ ctask->unsol_count);
++ }
++
+ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+@@ -210,14 +219,6 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(ctask->sc->request_bufflen == 0);
+-
+- debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, ctask->sc->request_bufflen,
+- ctask->imm_count, ctask->unsol_count);
+- }
+-
+ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
+ conn->id, ctask->itt);
+
diff --git a/kernel_patches/backport/2.6.9_U4/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch b/kernel_patches/backport/2.6.9_U4/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
new file mode 100644
index 0000000..417415f
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
@@ -0,0 +1,18 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5f82d6c..3a67d76 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -574,11 +574,8 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+- ISCSI_TARGET_NAME | ISCSI_TPGT |
+- ISCSI_USERNAME | ISCSI_PASSWORD |
+- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO,
++ ISCSI_TARGET_NAME |
++ ISCSI_TPGT,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_INITIATOR_NAME,
+ .host_template = &iscsi_iser_sht,
diff --git a/kernel_patches/backport/2.6.9_U4/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch b/kernel_patches/backport/2.6.9_U4/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
new file mode 100644
index 0000000..0b1a4c4
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
@@ -0,0 +1,16 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index c5941fa..2f4f125 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -140,8 +140,8 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ iser_ctask->iser_conn = iser_conn;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(sc->request_bufflen == 0);
++ BUG_ON(ctask->total_length == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, sc->request_bufflen, ctask->imm_count,
++ ctask->itt, ctask->total_length, ctask->imm_count,
+ ctask->unsol_count);
+ }
+
diff --git a/kernel_patches/backport/2.6.9_U4/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch b/kernel_patches/backport/2.6.9_U4/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
new file mode 100644
index 0000000..f207af3
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
@@ -0,0 +1,14 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 2f4f125..940bf98 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -576,8 +576,7 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME |
+ ISCSI_TPGT,
+- .host_param_mask = ISCSI_HOST_HWADDRESS |
+- ISCSI_HOST_INITIATOR_NAME,
++ .host_param_mask = ISCSI_HOST_HWADDRESS,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
diff --git a/kernel_patches/backport/2.6.9_U4/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch b/kernel_patches/backport/2.6.9_U4/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
new file mode 100644
index 0000000..f9dceb1
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
@@ -0,0 +1,22 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 940bf98..6a35eff 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -576,7 +576,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME |
+ ISCSI_TPGT,
+- .host_param_mask = ISCSI_HOST_HWADDRESS,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
+@@ -593,9 +593,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+ .stop_conn = iscsi_conn_stop,
+- /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
+- .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
diff --git a/kernel_patches/backport/2.6.9_U4/iser_09_fix_inclusion_order.patch b/kernel_patches/backport/2.6.9_U4/iser_09_fix_inclusion_order.patch
new file mode 100644
index 0000000..3c2a969
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_09_fix_inclusion_order.patch
@@ -0,0 +1,13 @@
+--- linux-2.6.20-rc7-orig/drivers/infiniband/ulp/iser/iscsi_iser.c 2007-02-08 09:13:43.000000000 +0200
++++ linux-2.6.20-rc7/drivers/infiniband/ulp/iser/iscsi_iser.c 2007-02-08 09:14:31.000000000 +0200
+@@ -70,9 +70,8 @@
+ #include <scsi/scsi_tcq.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi.h>
+-#include <scsi/scsi_transport_iscsi.h>
+-
+ #include "iscsi_iser.h"
++#include <scsi/scsi_transport_iscsi.h>
+
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
diff --git a/kernel_patches/backport/2.6.9_U4/iser_10_fix_struct_scsi_host_template.patch b/kernel_patches/backport/2.6.9_U4/iser_10_fix_struct_scsi_host_template.patch
new file mode 100644
index 0000000..5b28ac4
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_10_fix_struct_scsi_host_template.patch
@@ -0,0 +1,31 @@
+From 828e0ad429b92cf75781770ceb9ef7086f34fde2 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:31:42 +0300
+Subject: [PATCH] fix_struct_scsi_host_template
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 9bf24c6..de1e783 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -542,13 +542,11 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .module = THIS_MODULE,
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+- .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+- .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "iscsi_iser",
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U4/iser_11_add_fmr_unalign_cnt.patch b/kernel_patches/backport/2.6.9_U4/iser_11_add_fmr_unalign_cnt.patch
new file mode 100644
index 0000000..ef2a2d6
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_11_add_fmr_unalign_cnt.patch
@@ -0,0 +1,25 @@
+From 1255c8e5209ce19644e83e353c260f2eddc62cca Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:54:57 +0300
+Subject: [PATCH] add fmr_unalign_cnt to struct iscsi_conn
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/libiscsi.h | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index ea0816d..182421f 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -197,6 +197,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_queue {
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U4/iser_12_remove_hdr_max.patch b/kernel_patches/backport/2.6.9_U4/iser_12_remove_hdr_max.patch
new file mode 100644
index 0000000..c475001
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_12_remove_hdr_max.patch
@@ -0,0 +1,25 @@
+From 97672ef8a29da5e16774d1de9527b2cc29415e36 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:59:16 +0300
+Subject: [PATCH] remove hdr_max
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index de1e783..6451e9d 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -394,7 +394,6 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ ctask = session->cmds[i];
+ iser_ctask = ctask->dd_data;
+ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
+- ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+
+ for (i = 0; i < session->mgmtpool_max; i++) {
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U4/iser_13_fix_netlink_kernel_create.patch b/kernel_patches/backport/2.6.9_U4/iser_13_fix_netlink_kernel_create.patch
new file mode 100644
index 0000000..d47df44
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_13_fix_netlink_kernel_create.patch
@@ -0,0 +1,26 @@
+From db61fe2c3062d8918e793ddc7e1a8cc3694bf620 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:20:42 +0300
+Subject: [PATCH] fix netlink_kernel_create
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index e969ef7..a2f4fb7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -1401,7 +1401,7 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ nls = netlink_kernel_create(NULL, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
+ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U4/iser_14_sync_attribute_container.c_from_ofed1.3.patch b/kernel_patches/backport/2.6.9_U4/iser_14_sync_attribute_container.c_from_ofed1.3.patch
new file mode 100644
index 0000000..e926007
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_14_sync_attribute_container.c_from_ofed1.3.patch
@@ -0,0 +1,394 @@
+From bed65721f623039a119b5ff03c6c1fe44a1ccfb3 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:26:20 +0300
+Subject: [PATCH] sync attribute_container.c from ofed1.3
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/base/attribute_container.c | 100 +++++++++++++++++------------------
+ drivers/base/transport_class.c | 21 ++++----
+ 2 files changed, 60 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
+index f57652d..7370d7c 100644
+--- a/drivers/base/attribute_container.c
++++ b/drivers/base/attribute_container.c
+@@ -27,21 +27,21 @@
+ struct internal_container {
+ struct klist_node node;
+ struct attribute_container *cont;
+- struct device classdev;
++ struct class_device classdev;
+ };
+
+ static void internal_container_klist_get(struct klist_node *n)
+ {
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+- get_device(&ic->classdev);
++ class_device_get(&ic->classdev);
+ }
+
+ static void internal_container_klist_put(struct klist_node *n)
+ {
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+- put_device(&ic->classdev);
++ class_device_put(&ic->classdev);
+ }
+
+
+@@ -53,7 +53,7 @@ static void internal_container_klist_put(struct klist_node *n)
+ * Returns the container associated with this classdev.
+ */
+ struct attribute_container *
+-attribute_container_classdev_to_container(struct device *classdev)
++attribute_container_classdev_to_container(struct class_device *classdev)
+ {
+ struct internal_container *ic =
+ container_of(classdev, struct internal_container, classdev);
+@@ -61,7 +61,7 @@ attribute_container_classdev_to_container(struct device *classdev)
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
+
+-static LIST_HEAD(attribute_container_list);
++static struct list_head attribute_container_list;
+
+ static DEFINE_MUTEX(attribute_container_mutex);
+
+@@ -110,11 +110,11 @@ attribute_container_unregister(struct attribute_container *cont)
+ EXPORT_SYMBOL_GPL(attribute_container_unregister);
+
+ /* private function used as class release */
+-static void attribute_container_release(struct device *classdev)
++static void attribute_container_release(struct class_device *classdev)
+ {
+ struct internal_container *ic
+ = container_of(classdev, struct internal_container, classdev);
+- struct device *dev = classdev->parent;
++ struct device *dev = classdev->dev;
+
+ kfree(ic);
+ put_device(dev);
+@@ -129,12 +129,12 @@ static void attribute_container_release(struct device *classdev)
+ * This function allocates storage for the class device(s) to be
+ * attached to dev (one for each matching attribute_container). If no
+ * fn is provided, the code will simply register the class device via
+- * device_add. If a function is provided, it is expected to add
++ * class_device_add. If a function is provided, it is expected to add
+ * the class device at the appropriate time. One of the things that
+ * might be necessary is to allocate and initialise the classdev and
+ * then add it a later time. To do this, call this routine for
+ * allocation and initialisation and then use
+- * attribute_container_device_trigger() to call device_add() on
++ * attribute_container_device_trigger() to call class_device_add() on
+ * it. Note: after this, the class device contains a reference to dev
+ * which is not relinquished until the release of the classdev.
+ */
+@@ -142,7 +142,7 @@ void
+ attribute_container_add_device(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -163,11 +163,11 @@ attribute_container_add_device(struct device *dev,
+ }
+
+ ic->cont = cont;
+- device_initialize(&ic->classdev);
+- ic->classdev.parent = get_device(dev);
++ class_device_initialize(&ic->classdev);
++ ic->classdev.dev = get_device(dev);
+ ic->classdev.class = cont->class;
+- cont->class->dev_release = attribute_container_release;
+- strcpy(ic->classdev.bus_id, dev->bus_id);
++ cont->class->release = attribute_container_release;
++ strcpy(ic->classdev.class_id, dev->bus_id);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else
+@@ -195,19 +195,20 @@ attribute_container_add_device(struct device *dev,
+ * @fn: A function to call to remove the device
+ *
+ * This routine triggers device removal. If fn is NULL, then it is
+- * simply done via device_unregister (note that if something
++ * simply done via class_device_unregister (note that if something
+ * still has a reference to the classdev, then the memory occupied
+ * will not be freed until the classdev is released). If you want a
+ * two phase release: remove from visibility and then delete the
+ * device, then you should use this routine with a fn that calls
+- * device_del() and then use attribute_container_device_trigger()
+- * to do the final put on the classdev.
++ * class_device_del() and then use
++ * attribute_container_device_trigger() to do the final put on the
++ * classdev.
+ */
+ void
+ attribute_container_remove_device(struct device *dev,
+ void (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -223,14 +224,14 @@ attribute_container_remove_device(struct device *dev,
+ continue;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (dev != ic->classdev.parent)
++ if (dev != ic->classdev.dev)
+ continue;
+ klist_del(&ic->node);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else {
+ attribute_container_remove_attrs(&ic->classdev);
+- device_unregister(&ic->classdev);
++ class_device_unregister(&ic->classdev);
+ }
+ }
+ }
+@@ -251,7 +252,7 @@ void
+ attribute_container_device_trigger(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -269,7 +270,7 @@ attribute_container_device_trigger(struct device *dev,
+ }
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (dev == ic->classdev.parent)
++ if (dev == ic->classdev.dev)
+ fn(cont, dev, &ic->classdev);
+ }
+ }
+@@ -312,23 +313,18 @@ attribute_container_trigger(struct device *dev,
+ * attributes listed in the container
+ */
+ int
+-attribute_container_add_attrs(struct device *classdev)
++attribute_container_add_attrs(struct class_device *classdev)
+ {
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+- struct device_attribute **attrs = cont->attrs;
++ struct class_device_attribute **attrs = cont->attrs;
+ int i, error;
+
+- BUG_ON(attrs && cont->grp);
+-
+- if (!attrs && !cont->grp)
++ if (!attrs)
+ return 0;
+
+- if (cont->grp)
+- return sysfs_create_group(&classdev->kobj, cont->grp);
+-
+ for (i = 0; attrs[i]; i++) {
+- error = device_create_file(classdev, attrs[i]);
++ error = class_device_create_file(classdev, attrs[i]);
+ if (error)
+ return error;
+ }
+@@ -337,18 +333,18 @@ attribute_container_add_attrs(struct device *classdev)
+ }
+
+ /**
+- * attribute_container_add_class_device - same function as device_add
++ * attribute_container_add_class_device - same function as class_device_add
+ *
+ * @classdev: the class device to add
+ *
+- * This performs essentially the same function as device_add except for
++ * This performs essentially the same function as class_device_add except for
+ * attribute containers, namely add the classdev to the system and then
+ * create the attribute files
+ */
+ int
+-attribute_container_add_class_device(struct device *classdev)
++attribute_container_add_class_device(struct class_device *classdev)
+ {
+- int error = device_add(classdev);
++ int error = class_device_add(classdev);
+ if (error)
+ return error;
+ return attribute_container_add_attrs(classdev);
+@@ -363,7 +359,7 @@ attribute_container_add_class_device(struct device *classdev)
+ int
+ attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ return attribute_container_add_class_device(classdev);
+ }
+@@ -375,23 +371,18 @@ attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ *
+ */
+ void
+-attribute_container_remove_attrs(struct device *classdev)
++attribute_container_remove_attrs(struct class_device *classdev)
+ {
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+- struct device_attribute **attrs = cont->attrs;
++ struct class_device_attribute **attrs = cont->attrs;
+ int i;
+
+- if (!attrs && !cont->grp)
++ if (!attrs)
+ return;
+
+- if (cont->grp) {
+- sysfs_remove_group(&classdev->kobj, cont->grp);
+- return ;
+- }
+-
+ for (i = 0; attrs[i]; i++)
+- device_remove_file(classdev, attrs[i]);
++ class_device_remove_file(classdev, attrs[i]);
+ }
+
+ /**
+@@ -400,13 +391,13 @@ attribute_container_remove_attrs(struct device *classdev)
+ * @classdev: the class device
+ *
+ * This function simply removes all the attribute files and then calls
+- * device_del.
++ * class_device_del.
+ */
+ void
+-attribute_container_class_device_del(struct device *classdev)
++attribute_container_class_device_del(struct class_device *classdev)
+ {
+ attribute_container_remove_attrs(classdev);
+- device_del(classdev);
++ class_device_del(classdev);
+ }
+
+ /**
+@@ -418,16 +409,16 @@ attribute_container_class_device_del(struct device *classdev)
+ * Looks up the device in the container's list of class devices and returns
+ * the corresponding class_device.
+ */
+-struct device *
++struct class_device *
+ attribute_container_find_class_device(struct attribute_container *cont,
+ struct device *dev)
+ {
+- struct device *cdev = NULL;
++ struct class_device *cdev = NULL;
+ struct internal_container *ic;
+ struct klist_iter iter;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (ic->classdev.parent == dev) {
++ if (ic->classdev.dev == dev) {
+ cdev = &ic->classdev;
+ /* FIXME: must exit iterator then break */
+ klist_iter_exit(&iter);
+@@ -438,3 +429,10 @@ attribute_container_find_class_device(struct attribute_container *cont,
+ return cdev;
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
++
++int __init
++attribute_container_init(void)
++{
++ INIT_LIST_HEAD(&attribute_container_list);
++ return 0;
++}
+diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
+index 84997ef..f25e7c6 100644
+--- a/drivers/base/transport_class.c
++++ b/drivers/base/transport_class.c
+@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(transport_class_unregister);
+
+ static int anon_transport_dummy_function(struct transport_container *tc,
+ struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ /* do nothing */
+ return 0;
+@@ -108,14 +108,13 @@ EXPORT_SYMBOL_GPL(anon_transport_class_register);
+ */
+ void anon_transport_class_unregister(struct anon_transport_class *atc)
+ {
+- if (unlikely(attribute_container_unregister(&atc->container)))
+- BUG();
++ attribute_container_unregister(&atc->container);
+ }
+ EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
+
+ static int transport_setup_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+@@ -127,7 +126,9 @@ static int transport_setup_classdev(struct attribute_container *cont,
+ }
+
+ /**
+- * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
++ * transport_setup_device - declare a new dev for transport class association
++ * but don't make it visible yet.
++ *
+ * @dev: the generic device representing the entity being added
+ *
+ * Usually, dev represents some component in the HBA system (either
+@@ -149,7 +150,7 @@ EXPORT_SYMBOL_GPL(transport_setup_device);
+
+ static int transport_add_class_device(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ int error = attribute_container_add_class_device(classdev);
+ struct transport_container *tcont =
+@@ -181,7 +182,7 @@ EXPORT_SYMBOL_GPL(transport_add_device);
+
+ static int transport_configure(struct attribute_container *cont,
+ struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+@@ -212,7 +213,7 @@ EXPORT_SYMBOL_GPL(transport_configure_device);
+
+ static int transport_remove_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_container *tcont =
+ attribute_container_to_transport_container(cont);
+@@ -251,12 +252,12 @@ EXPORT_SYMBOL_GPL(transport_remove_device);
+
+ static void transport_destroy_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+
+ if (tclass->remove != anon_transport_dummy_function)
+- put_device(classdev);
++ class_device_put(classdev);
+ }
+
+
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U4/iser_15_fix_iscsi_free_mgmt_task.patch b/kernel_patches/backport/2.6.9_U4/iser_15_fix_iscsi_free_mgmt_task.patch
new file mode 100644
index 0000000..7a3a3ea
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U4/iser_15_fix_iscsi_free_mgmt_task.patch
@@ -0,0 +1,28 @@
+From 5a9fd2300982aca58f1306bdb98cab878998a607 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:53:59 +0300
+Subject: [PATCH] fix iscsi_free_mgmt_task
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iser_initiator.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 4e20c8b..e7f2399 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -627,7 +627,9 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&conn->session->lock);
+- iscsi_free_mgmt_task(conn, mtask);
++ list_del(&mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
+ spin_unlock(&session->lock);
+ }
+ }
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U5/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch b/kernel_patches/backport/2.6.9_U5/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
new file mode 100644
index 0000000..e35b289
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
@@ -0,0 +1,9402 @@
+From f75042cdafb7f42cd1f9a244872ae2f7896e3278 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Wed, 20 Aug 2008 14:32:54 +0300
+Subject: [PATCH 1/1] iscsi_01_sync_kernel_code_with_ofed_1_2_5
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 2537 +++++++++++++++++++----------------
+ drivers/scsi/iscsi_tcp.h | 136 ++-
+ drivers/scsi/libiscsi.c | 2501 ++++++++++++----------------------
+ drivers/scsi/scsi_transport_iscsi.c | 1208 +++++------------
+ include/scsi/iscsi_if.h | 119 +--
+ include/scsi/iscsi_proto.h | 23 +-
+ include/scsi/libiscsi.h | 247 ++---
+ include/scsi/scsi_transport_iscsi.h | 148 +--
+ 8 files changed, 2862 insertions(+), 4057 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..c9a3abf 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -29,15 +29,14 @@
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/inet.h>
+-#include <linux/file.h>
+ #include <linux/blkdev.h>
+ #include <linux/crypto.h>
+ #include <linux/delay.h>
+ #include <linux/kfifo.h>
+ #include <linux/scatterlist.h>
++#include <linux/mutex.h>
+ #include <net/tcp.h>
+ #include <scsi/scsi_cmnd.h>
+-#include <scsi/scsi_device.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_transport_iscsi.h>
+@@ -48,7 +47,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus at yahoo.com>, "
+ "Alex Aizman <itn780 at yahoo.com>");
+ MODULE_DESCRIPTION("iSCSI/TCP data-path");
+ MODULE_LICENSE("GPL");
+-#undef DEBUG_TCP
++/* #define DEBUG_TCP */
+ #define DEBUG_ASSERT
+
+ #ifdef DEBUG_TCP
+@@ -64,515 +63,200 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+-static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment);
+-
+-/*
+- * Scatterlist handling: inside the iscsi_segment, we
+- * remember an index into the scatterlist, and set data/size
+- * to the current scatterlist entry. For highmem pages, we
+- * kmap as needed.
+- *
+- * Note that the page is unmapped when we return from
+- * TCP's data_ready handler, so we may end up mapping and
+- * unmapping the same page repeatedly. The whole reason
+- * for this is that we shouldn't keep the page mapped
+- * outside the softirq.
+- */
+-
+-/**
+- * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
+- * @segment: the buffer object
+- * @sg: scatterlist
+- * @offset: byte offset into that sg entry
+- *
+- * This function sets up the segment so that subsequent
+- * data is copied to the indicated sg entry, at the given
+- * offset.
+- */
+ static inline void
+-iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
+- struct scatterlist *sg, unsigned int offset)
++iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
+ {
+- segment->sg = sg;
+- segment->sg_offset = offset;
+- segment->size = min(sg->length - offset,
+- segment->total_size - segment->total_copied);
+- segment->data = NULL;
++ ibuf->sg.page = virt_to_page(vbuf);
++ ibuf->sg.offset = offset_in_page(vbuf);
++ ibuf->sg.length = size;
++ ibuf->sent = 0;
++ ibuf->use_sendmsg = 1;
+ }
+
+-/**
+- * iscsi_tcp_segment_map - map the current S/G page
+- * @segment: iscsi_segment
+- * @recv: 1 if called from recv path
+- *
+- * We only need to possibly kmap data if scatter lists are being used,
+- * because the iscsi passthrough and internal IO paths will never use high
+- * mem pages.
+- */
+ static inline void
+-iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
++iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
+ {
+- struct scatterlist *sg;
+-
+- if (segment->data != NULL || !segment->sg)
+- return;
+-
+- sg = segment->sg;
+- BUG_ON(segment->sg_mapped);
+- BUG_ON(sg->length == 0);
+-
++ ibuf->sg.page = sg->page;
++ ibuf->sg.offset = sg->offset;
++ ibuf->sg.length = sg->length;
+ /*
+- * If the page count is greater than one it is ok to send
+- * to the network layer's zero copy send path. If not we
+- * have to go the slow sendmsg path. We always map for the
+- * recv path.
++ * Fastpath: sg element fits into single page
+ */
+- if (page_count(sg_page(sg)) >= 1 && !recv)
+- return;
+-
+- debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
+- segment);
+- segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+- segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
++ if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
++ ibuf->use_sendmsg = 0;
++ else
++ ibuf->use_sendmsg = 1;
++ ibuf->sent = 0;
+ }
+
+-static inline void
+-iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
++static inline int
++iscsi_buf_left(struct iscsi_buf *ibuf)
+ {
+- debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
++ int rc;
+
+- if (segment->sg_mapped) {
+- debug_tcp("iscsi_tcp_segment_unmap valid\n");
+- kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+- segment->sg_mapped = NULL;
+- segment->data = NULL;
+- }
++ rc = ibuf->sg.length - ibuf->sent;
++ BUG_ON(rc < 0);
++ return rc;
+ }
+
+-/*
+- * Splice the digest buffer into the buffer
+- */
+ static inline void
+-iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
++iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
++ u8* crc)
+ {
+- segment->data = digest;
+- segment->digest_len = ISCSI_DIGEST_SIZE;
+- segment->total_size += ISCSI_DIGEST_SIZE;
+- segment->size = ISCSI_DIGEST_SIZE;
+- segment->copied = 0;
+- segment->sg = NULL;
+- segment->hash = NULL;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++
++ crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
++ buf->sg.length = tcp_conn->hdr_size;
+ }
+
+-/**
+- * iscsi_tcp_segment_done - check whether the segment is complete
+- * @segment: iscsi segment to check
+- * @recv: set to one of this is called from the recv path
+- * @copied: number of bytes copied
+- *
+- * Check if we're done receiving this segment. If the receive
+- * buffer is full but we expect more data, move on to the
+- * next entry in the scatterlist.
+- *
+- * If the amount of data we received isn't a multiple of 4,
+- * we will transparently receive the pad bytes, too.
+- *
+- * This function must be re-entrant.
+- */
+ static inline int
+-iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
++iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
+ {
+- static unsigned char padbuf[ISCSI_PAD_LEN];
+- struct scatterlist sg;
+- unsigned int pad;
++ struct sk_buff *skb = tcp_conn->in.skb;
++
++ tcp_conn->in.zero_copy_hdr = 0;
+
+- debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
+- segment->size, recv ? "recv" : "xmit");
+- if (segment->hash && copied) {
++ if (tcp_conn->in.copy >= tcp_conn->hdr_size &&
++ tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
+ /*
+- * If a segment is kmapd we must unmap it before sending
+- * to the crypto layer since that will try to kmap it again.
++ * Zero-copy PDU Header: using connection context
++ * to store header pointer.
+ */
+- iscsi_tcp_segment_unmap(segment);
+-
+- if (!segment->data) {
+- sg_init_table(&sg, 1);
+- sg_set_page(&sg, sg_page(segment->sg), copied,
+- segment->copied + segment->sg_offset +
+- segment->sg->offset);
+- } else
+- sg_init_one(&sg, segment->data + segment->copied,
+- copied);
+- crypto_hash_update(segment->hash, &sg, copied);
+- }
+-
+- segment->copied += copied;
+- if (segment->copied < segment->size) {
+- iscsi_tcp_segment_map(segment, recv);
+- return 0;
+- }
+-
+- segment->total_copied += segment->copied;
+- segment->copied = 0;
+- segment->size = 0;
+-
+- /* Unmap the current scatterlist page, if there is one. */
+- iscsi_tcp_segment_unmap(segment);
+-
+- /* Do we have more scatterlist entries? */
+- debug_tcp("total copied %u total size %u\n", segment->total_copied,
+- segment->total_size);
+- if (segment->total_copied < segment->total_size) {
+- /* Proceed to the next entry in the scatterlist. */
+- iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
+- 0);
+- iscsi_tcp_segment_map(segment, recv);
+- BUG_ON(segment->size == 0);
+- return 0;
+- }
+-
+- /* Do we need to handle padding? */
+- pad = iscsi_padding(segment->total_copied);
+- if (pad != 0) {
+- debug_tcp("consume %d pad bytes\n", pad);
+- segment->total_size += pad;
+- segment->size = pad;
+- segment->data = padbuf;
+- return 0;
+- }
+-
+- /*
+- * Set us up for transferring the data digest. hdr digest
+- * is completely handled in hdr done function.
+- */
+- if (segment->hash) {
+- crypto_hash_final(segment->hash, segment->digest);
+- iscsi_tcp_segment_splice_digest(segment,
+- recv ? segment->recv_digest : segment->digest);
+- return 0;
+- }
+-
+- return 1;
+-}
+-
+-/**
+- * iscsi_tcp_xmit_segment - transmit segment
+- * @tcp_conn: the iSCSI TCP connection
+- * @segment: the buffer to transmnit
+- *
+- * This function transmits as much of the buffer as
+- * the network layer will accept, and returns the number of
+- * bytes transmitted.
+- *
+- * If CRC hashing is enabled, the function will compute the
+- * hash as it goes. When the entire segment has been transmitted,
+- * it will retrieve the hash value and send it as well.
+- */
+-static int
+-iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct socket *sk = tcp_conn->sock;
+- unsigned int copied = 0;
+- int r = 0;
+-
+- while (!iscsi_tcp_segment_done(segment, 0, r)) {
+- struct scatterlist *sg;
+- unsigned int offset, copy;
+- int flags = 0;
+-
+- r = 0;
+- offset = segment->copied;
+- copy = segment->size - offset;
+-
+- if (segment->total_copied + segment->size < segment->total_size)
+- flags |= MSG_MORE;
+-
+- /* Use sendpage if we can; else fall back to sendmsg */
+- if (!segment->data) {
+- sg = segment->sg;
+- offset += segment->sg_offset + sg->offset;
+- r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
+- flags);
++ if (skb_shinfo(skb)->frag_list == NULL &&
++ !skb_shinfo(skb)->nr_frags) {
++ tcp_conn->in.hdr = (struct iscsi_hdr *)
++ ((char*)skb->data + tcp_conn->in.offset);
++ tcp_conn->in.zero_copy_hdr = 1;
+ } else {
+- struct msghdr msg = { .msg_flags = flags };
+- struct kvec iov = {
+- .iov_base = segment->data + offset,
+- .iov_len = copy
+- };
+-
+- r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
++ /* ignoring return code since we checked
++ * in.copy before */
++ skb_copy_bits(skb, tcp_conn->in.offset,
++ &tcp_conn->hdr, tcp_conn->hdr_size);
++ tcp_conn->in.hdr = &tcp_conn->hdr;
+ }
++ tcp_conn->in.offset += tcp_conn->hdr_size;
++ tcp_conn->in.copy -= tcp_conn->hdr_size;
++ } else {
++ int hdr_remains;
++ int copylen;
+
+- if (r < 0) {
+- iscsi_tcp_segment_unmap(segment);
+- if (copied || r == -EAGAIN)
+- break;
+- return r;
+- }
+- copied += r;
+- }
+- return copied;
+-}
+-
+-/**
+- * iscsi_tcp_segment_recv - copy data to segment
+- * @tcp_conn: the iSCSI TCP connection
+- * @segment: the buffer to copy to
+- * @ptr: data pointer
+- * @len: amount of data available
+- *
+- * This function copies up to @len bytes to the
+- * given buffer, and returns the number of bytes
+- * consumed, which can actually be less than @len.
+- *
+- * If hash digest is enabled, the function will update the
+- * hash while copying.
+- * Combining these two operations doesn't buy us a lot (yet),
+- * but in the future we could implement combined copy+crc,
+- * just way we do for network layer checksums.
+- */
+-static int
+-iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment, const void *ptr,
+- unsigned int len)
+-{
+- unsigned int copy = 0, copied = 0;
+-
+- while (!iscsi_tcp_segment_done(segment, 1, copy)) {
+- if (copied == len) {
+- debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
+- len);
+- break;
+- }
+-
+- copy = min(len - copied, segment->size - segment->copied);
+- debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
+- memcpy(segment->data + segment->copied, ptr + copied, copy);
+- copied += copy;
+- }
+- return copied;
+-}
+-
+-static inline void
+-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
+- unsigned char digest[ISCSI_DIGEST_SIZE])
+-{
+- struct scatterlist sg;
+-
+- sg_init_one(&sg, hdr, hdrlen);
+- crypto_hash_digest(hash, &sg, hdrlen, digest);
+-}
+-
+-static inline int
+-iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- if (!segment->digest_len)
+- return 1;
+-
+- if (memcmp(segment->recv_digest, segment->digest,
+- segment->digest_len)) {
+- debug_scsi("digest mismatch\n");
+- return 0;
+- }
++ /*
++ * PDU header scattered across SKB's,
++ * copying it... This'll happen quite rarely.
++ */
+
+- return 1;
+-}
++ if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER)
++ tcp_conn->in.hdr_offset = 0;
+
+-/*
+- * Helper function to set up segment buffer
+- */
+-static inline void
+-__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
+- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+-{
+- memset(segment, 0, sizeof(*segment));
+- segment->total_size = size;
+- segment->done = done;
++ hdr_remains = tcp_conn->hdr_size - tcp_conn->in.hdr_offset;
++ BUG_ON(hdr_remains <= 0);
+
+- if (hash) {
+- segment->hash = hash;
+- crypto_hash_init(hash);
+- }
+-}
++ copylen = min(tcp_conn->in.copy, hdr_remains);
++ skb_copy_bits(skb, tcp_conn->in.offset,
++ (char*)&tcp_conn->hdr + tcp_conn->in.hdr_offset,
++ copylen);
+
+-static inline void
+-iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
+- size_t size, iscsi_segment_done_fn_t *done,
+- struct hash_desc *hash)
+-{
+- __iscsi_segment_init(segment, size, done, hash);
+- segment->data = data;
+- segment->size = size;
+-}
++ debug_tcp("PDU gather offset %d bytes %d in.offset %d "
++ "in.copy %d\n", tcp_conn->in.hdr_offset, copylen,
++ tcp_conn->in.offset, tcp_conn->in.copy);
+
+-static inline int
+-iscsi_segment_seek_sg(struct iscsi_segment *segment,
+- struct scatterlist *sg_list, unsigned int sg_count,
+- unsigned int offset, size_t size,
+- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+-{
+- struct scatterlist *sg;
+- unsigned int i;
+-
+- debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
+- offset, size);
+- __iscsi_segment_init(segment, size, done, hash);
+- for_each_sg(sg_list, sg, sg_count, i) {
+- debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
+- sg->offset);
+- if (offset < sg->length) {
+- iscsi_tcp_segment_init_sg(segment, sg, offset);
+- return 0;
++ tcp_conn->in.offset += copylen;
++ tcp_conn->in.copy -= copylen;
++ if (copylen < hdr_remains) {
++ tcp_conn->in_progress = IN_PROGRESS_HEADER_GATHER;
++ tcp_conn->in.hdr_offset += copylen;
++ return -EAGAIN;
+ }
+- offset -= sg->length;
++ tcp_conn->in.hdr = &tcp_conn->hdr;
++ tcp_conn->discontiguous_hdr_cnt++;
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ }
+
+- return ISCSI_ERR_DATA_OFFSET;
+-}
+-
+-/**
+- * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
+- * @tcp_conn: iscsi connection to prep for
+- *
+- * This function always passes NULL for the hash argument, because when this
+- * function is called we do not yet know the final size of the header and want
+- * to delay the digest processing until we know that.
+- */
+-static void
+-iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+-{
+- debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
+- tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
+- iscsi_segment_init_linear(&tcp_conn->in.segment,
+- tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
+- iscsi_tcp_hdr_recv_done, NULL);
+-}
+-
+-/*
+- * Handle incoming reply to any other type of command
+- */
+-static int
+-iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- int rc = 0;
+-
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_DATA_DGST;
+-
+- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
+- conn->data, tcp_conn->in.datalen);
+- if (rc)
+- return rc;
+-
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+ }
+
+-static void
+-iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct hash_desc *rx_hash = NULL;
+-
+- if (conn->datadgst_en)
+- rx_hash = &tcp_conn->rx_hash;
+-
+- iscsi_segment_init_linear(&tcp_conn->in.segment,
+- conn->data, tcp_conn->in.datalen,
+- iscsi_tcp_data_recv_done, rx_hash);
+-}
+-
+ /*
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
++ struct scsi_cmnd *sc;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
+- if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
+- tcp_task->r2t = NULL;
+- }
++ sc = ctask->sc;
++ if (unlikely(!sc))
++ return;
++
++ tcp_ctask->xmstate = XMSTATE_IDLE;
++ tcp_ctask->r2t = NULL;
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
++ int rc;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc)
++ return rc;
++ /*
++ * setup Data-In byte counter (gets decremented..)
++ */
++ ctask->data_count = tcp_conn->in.datalen;
++
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (ctask->datasn != datasn)
+ return ISCSI_ERR_DATASN;
+- }
+
+- tcp_task->exp_datasn++;
++ ctask->datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
+- debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > ctask->total_length)
+ return ISCSI_ERR_DATA_OFFSET;
+- }
+
+ if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ struct scsi_cmnd *sc = ctask->sc;
++
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+- if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
+- ISCSI_FLAG_DATA_OVERFLOW)) {
++ if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+ if (res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
+- else
++ res_count <= sc->request_bufflen) {
++ sc->resid = res_count;
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ } else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
++ sc->resid = be32_to_cpu(rhdr->residual_count);
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ } else
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ }
+
+ conn->datain_pdus_cnt++;
+@@ -582,7 +266,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,10 +276,11 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ hdr = &r2t->dtask.hdr;
+ memset(hdr, 0, sizeof(struct iscsi_data));
+@@ -603,8 +288,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -619,57 +304,94 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ conn->dataout_pdus_cnt++;
+
+ r2t->sent = 0;
++
++ iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
++ sizeof(struct iscsi_hdr));
++
++ if (sc->use_sg) {
++ int i, sg_count = 0;
++ struct scatterlist *sg = sc->request_buffer;
++
++ r2t->sg = NULL;
++ for (i = 0; i < sc->use_sg; i++, sg += 1) {
++ /* FIXME: prefetch ? */
++ if (sg_count + sg->length > r2t->data_offset) {
++ int page_offset;
++
++ /* sg page found! */
++
++ /* offset within this page */
++ page_offset = r2t->data_offset - sg_count;
++
++ /* fill in this buffer */
++ iscsi_buf_init_sg(&r2t->sendbuf, sg);
++ r2t->sendbuf.sg.offset += page_offset;
++ r2t->sendbuf.sg.length -= page_offset;
++
++ /* xmit logic will continue with next one */
++ r2t->sg = sg + 1;
++ break;
++ }
++ sg_count += sg->length;
++ }
++ BUG_ON(r2t->sg == NULL);
++ } else {
++ iscsi_buf_init_iov(&r2t->sendbuf,
++ (char*)sc->request_buffer + r2t->data_offset,
++ r2t->data_count);
++ r2t->sg = NULL;
++ }
+ }
+
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ int rc;
+
+ if (tcp_conn->in.datalen) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2t with datalen %d\n",
+- tcp_conn->in.datalen);
++ printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
++ tcp_conn->in.datalen);
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
+ return ISCSI_ERR_R2TSN;
+- }
+
+- /* fill-in new R2T associated with the task */
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc)
++ return rc;
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+- iscsi_conn_printk(KERN_INFO, conn,
+- "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ /* FIXME: use R2TSN to detect missing R2T */
++
++ /* fill-in new R2T associated with the task */
++ spin_lock(&session->lock);
++ if (!ctask->sc || ctask->mtask ||
++ session->state != ISCSI_STATE_LOGGED_IN) {
++ printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
++ "recovery...\n", ctask->itt);
++ spin_unlock(&session->lock);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+ r2t->data_length = be32_to_cpu(rhdr->data_length);
+ if (r2t->data_length == 0) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
++ printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
++ spin_unlock(&session->lock);
+ return ISCSI_ERR_DATALEN;
+ }
+
+@@ -679,159 +401,122 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2T with data len %u at offset %u "
+- "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
++ if (r2t->data_offset + r2t->data_length > ctask->total_length) {
++ spin_unlock(&session->lock);
++ printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
++ "offset %u and total length %d\n", r2t->data_length,
++ r2t->data_offset, ctask->total_length);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+- conn->r2t_pdus_cnt++;
++ tcp_ctask->exp_r2tsn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
++ list_move_tail(&ctask->running, &conn->xmitqueue);
+
+- iscsi_requeue_task(task);
+- return 0;
+-}
+-
+-/*
+- * Handle incoming reply to DataIn command
+- */
+-static int
+-iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct iscsi_hdr *hdr = tcp_conn->in.hdr;
+- int rc;
+-
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_DATA_DGST;
+-
+- /* check for non-exceptional status */
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
+- if (rc)
+- return rc;
+- }
++ scsi_queue_work(session->host, &conn->xmitwork);
++ conn->r2t_pdus_cnt++;
++ spin_unlock(&session->lock);
+
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+ }
+
+-/**
+- * iscsi_tcp_hdr_dissect - process PDU header
+- * @conn: iSCSI connection
+- * @hdr: PDU header
+- *
+- * This function analyzes the header of the PDU received,
+- * and performs several sanity checks. If the PDU is accompanied
+- * by data, the receive buffer is set up to copy the incoming data
+- * to the correct location.
+- */
+ static int
+-iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
++iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_hdr *hdr;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ uint32_t cdgst, rdgst = 0, itt;
++
++ hdr = tcp_conn->in.hdr;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+ if (tcp_conn->in.datalen > conn->max_recv_dlength) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "iscsi_tcp: datalen %d > %d\n",
+- tcp_conn->in.datalen, conn->max_recv_dlength);
++ printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
++ tcp_conn->in.datalen, conn->max_recv_dlength);
+ return ISCSI_ERR_DATALEN;
+ }
++ tcp_conn->data_copied = 0;
+
+- /* Additional header segments. So far, we don't
+- * process additional headers.
+- */
++ /* read AHS */
+ ahslen = hdr->hlength << 2;
++ tcp_conn->in.offset += ahslen;
++ tcp_conn->in.copy -= ahslen;
++ if (tcp_conn->in.copy < 0) {
++ printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
++ "%d bytes\n", ahslen);
++ return ISCSI_ERR_AHSLEN;
++ }
++
++ /* calculate read padding */
++ tcp_conn->in.padding = tcp_conn->in.datalen & (ISCSI_PAD_LEN-1);
++ if (tcp_conn->in.padding) {
++ tcp_conn->in.padding = ISCSI_PAD_LEN - tcp_conn->in.padding;
++ debug_scsi("read padding %d bytes\n", tcp_conn->in.padding);
++ }
++
++ if (conn->hdrdgst_en) {
++ struct scatterlist sg;
++
++ sg_init_one(&sg, (u8 *)hdr,
++ sizeof(struct iscsi_hdr) + ahslen);
++ crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
++ (u8 *)&cdgst);
++ rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
++ ahslen);
++ if (cdgst != rdgst) {
++ printk(KERN_ERR "iscsi_tcp: hdrdgst error "
++ "recv 0x%x calc 0x%x\n", rdgst, cdgst);
++ return ISCSI_ERR_HDR_DGST;
++ }
++ }
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
++ rc = iscsi_verify_itt(conn, hdr, &itt);
++ if (rc == ISCSI_ERR_NO_SCSI_CMD) {
++ tcp_conn->in.datalen = 0; /* force drop */
++ return 0;
++ } else if (rc)
+ return rc;
+
+- debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
+- opcode, ahslen, tcp_conn->in.datalen);
++ debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
++ opcode, tcp_conn->in.offset, tcp_conn->in.copy,
++ ahslen, tcp_conn->in.datalen);
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
+- if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+-
+- /*
+- * Setup copy of Data-In into the Scsi_Cmnd
+- * Scatterlist case:
+- * We set up the iscsi_segment to point to the next
+- * scatterlist entry to copy to. As we go along,
+- * we move on to the next scatterlist entry and
+- * update the digest per-entry.
+- */
+- if (conn->datadgst_en)
+- rx_hash = &tcp_conn->rx_hash;
+-
+- debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+- "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
++ tcp_conn->in.ctask = session->cmds[itt];
++ rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
++ if (rc)
+ return rc;
+- }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+- if (tcp_conn->in.datalen) {
+- iscsi_tcp_data_recv_prep(tcp_conn);
+- return 0;
+- }
+- rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
++ tcp_conn->in.ctask = session->cmds[itt];
++ if (tcp_conn->in.datalen)
++ goto copy_hdr;
++
++ spin_lock(&session->lock);
++ rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
++ spin_unlock(&session->lock);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ tcp_conn->in.ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
++ else if (tcp_conn->in.ctask->sc->sc_data_direction ==
++ DMA_TO_DEVICE)
++ rc = iscsi_r2t_rsp(conn, tcp_conn->in.ctask);
+ else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -842,24 +527,18 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ * than 8K, but there are no targets that currently do this.
+ * For now we fail until we find a vendor that needs it
+ */
+- if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "iscsi_tcp: received buffer of "
+- "len %u but conn buffer is only %u "
+- "(opcode %0x)\n",
+- tcp_conn->in.datalen,
+- ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
++ if (ISCSI_DEF_MAX_RECV_SEG_LEN <
++ tcp_conn->in.datalen) {
++ printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
++ "but conn buffer is only %u (opcode %0x)\n",
++ tcp_conn->in.datalen,
++ ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+- /* If there's data coming in with the response,
+- * receive it to the connection's buffer.
+- */
+- if (tcp_conn->in.datalen) {
+- iscsi_tcp_data_recv_prep(tcp_conn);
+- return 0;
+- }
++ if (tcp_conn->in.datalen)
++ goto copy_hdr;
+ /* fall through */
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_NOOP_IN:
+@@ -871,161 +550,457 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ break;
+ }
+
+- if (rc == 0) {
+- /* Anything that comes with data should have
+- * been handled above. */
+- if (tcp_conn->in.datalen)
+- return ISCSI_ERR_PROTO;
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
++ return rc;
++
++copy_hdr:
++ /*
++ * if we did zero copy for the header but we will need multiple
++ * skbs to complete the command then we have to copy the header
++ * for later use
++ */
++ if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
++ (tcp_conn->in.datalen + tcp_conn->in.padding +
++ (conn->datadgst_en ? 4 : 0))) {
++ debug_tcp("Copying header for later use. in.copy %d in.datalen"
++ " %d\n", tcp_conn->in.copy, tcp_conn->in.datalen);
++ memcpy(&tcp_conn->hdr, tcp_conn->in.hdr,
++ sizeof(struct iscsi_hdr));
++ tcp_conn->in.hdr = &tcp_conn->hdr;
++ tcp_conn->in.zero_copy_hdr = 0;
+ }
++ return 0;
++}
+
+- return rc;
++/**
++ * iscsi_ctask_copy - copy skb bits to the destanation cmd task
++ * @conn: iscsi tcp connection
++ * @ctask: scsi command task
++ * @buf: buffer to copy to
++ * @buf_size: size of buffer
++ * @offset: offset within the buffer
++ *
++ * Notes:
++ * The function calls skb_copy_bits() and updates per-connection and
++ * per-cmd byte counters.
++ *
++ * Read counters (in bytes):
++ *
++ * conn->in.offset offset within in progress SKB
++ * conn->in.copy left to copy from in progress SKB
++ * including padding
++ * conn->in.copied copied already from in progress SKB
++ * conn->data_copied copied already from in progress buffer
++ * ctask->sent total bytes sent up to the MidLayer
++ * ctask->data_count left to copy from in progress Data-In
++ * buf_left left to copy from in progress buffer
++ **/
++static inline int
++iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
++ void *buf, int buf_size, int offset)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int buf_left = buf_size - (tcp_conn->data_copied + offset);
++ int size = min(tcp_conn->in.copy, buf_left);
++ int rc;
++
++ size = min(size, ctask->data_count);
++
++ debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
++ size, tcp_conn->in.offset, tcp_conn->in.copied);
++
++ BUG_ON(size <= 0);
++ BUG_ON(tcp_ctask->sent + size > ctask->total_length);
++
++ rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
++ (char*)buf + (offset + tcp_conn->data_copied), size);
++ /* must fit into skb->len */
++ BUG_ON(rc);
++
++ tcp_conn->in.offset += size;
++ tcp_conn->in.copy -= size;
++ tcp_conn->in.copied += size;
++ tcp_conn->data_copied += size;
++ tcp_ctask->sent += size;
++ ctask->data_count -= size;
++
++ BUG_ON(tcp_conn->in.copy < 0);
++ BUG_ON(ctask->data_count < 0);
++
++ if (buf_size != (tcp_conn->data_copied + offset)) {
++ if (!ctask->data_count) {
++ BUG_ON(buf_size - tcp_conn->data_copied < 0);
++ /* done with this PDU */
++ return buf_size - tcp_conn->data_copied;
++ }
++ return -EAGAIN;
++ }
++
++ /* done with this buffer or with both - PDU and buffer */
++ tcp_conn->data_copied = 0;
++ return 0;
+ }
+
+ /**
+- * iscsi_tcp_hdr_recv_done - process PDU header
++ * iscsi_tcp_copy - copy skb bits to the destanation buffer
++ * @conn: iscsi tcp connection
+ *
+- * This is the callback invoked when the PDU header has
+- * been received. If the header is followed by additional
+- * header segments, we go back for more data.
+- */
+-static int
+-iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
++ * Notes:
++ * The function calls skb_copy_bits() and updates per-connection
++ * byte counters.
++ **/
++static inline int
++iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
+ {
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct iscsi_hdr *hdr;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int buf_left = buf_size - tcp_conn->data_copied;
++ int size = min(tcp_conn->in.copy, buf_left);
++ int rc;
++
++ debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
++ size, tcp_conn->in.offset, tcp_conn->data_copied);
++ BUG_ON(size <= 0);
++
++ rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
++ (char*)conn->data + tcp_conn->data_copied, size);
++ BUG_ON(rc);
++
++ tcp_conn->in.offset += size;
++ tcp_conn->in.copy -= size;
++ tcp_conn->in.copied += size;
++ tcp_conn->data_copied += size;
++
++ if (buf_size != tcp_conn->data_copied)
++ return -EAGAIN;
++
++ return 0;
++}
++
++static inline void
++partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
++ int offset, int length)
++{
++ struct scatterlist temp;
+
+- /* Check if there are additional header segments
+- * *prior* to computing the digest, because we
+- * may need to go back to the caller for more.
++ memcpy(&temp, sg, sizeof(struct scatterlist));
++ temp.offset = offset;
++ temp.length = length;
++ crypto_hash_update(desc, &temp, length);
++}
++
++static void
++iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
++{
++ struct scatterlist tmp;
++
++ sg_init_one(&tmp, buf, len);
++ crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
++}
++
++static int iscsi_scsi_data_in(struct iscsi_conn *conn)
++{
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ struct iscsi_cmd_task *ctask = tcp_conn->in.ctask;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
++ struct scatterlist *sg;
++ int i, offset, rc = 0;
++
++ BUG_ON((void*)ctask != sc->SCp.ptr);
++
++ /*
++ * copying Data-In into the Scsi_Cmnd
+ */
+- hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
+- if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
+- /* Bump the header length - the caller will
+- * just loop around and get the AHS for us, and
+- * call again. */
+- unsigned int ahslen = hdr->hlength << 2;
+-
+- /* Make sure we don't overflow */
+- if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
+- return ISCSI_ERR_AHSLEN;
+-
+- segment->total_size += ahslen;
+- segment->size += ahslen;
+- return 0;
++ if (!sc->use_sg) {
++ i = ctask->data_count;
++ rc = iscsi_ctask_copy(tcp_conn, ctask, sc->request_buffer,
++ sc->request_bufflen,
++ tcp_ctask->data_offset);
++ if (rc == -EAGAIN)
++ return rc;
++ if (conn->datadgst_en)
++ iscsi_recv_digest_update(tcp_conn, sc->request_buffer,
++ i);
++ rc = 0;
++ goto done;
+ }
+
+- /* We're done processing the header. See if we're doing
+- * header digests; if so, set up the recv_digest buffer
+- * and go back for more. */
+- if (conn->hdrdgst_en) {
+- if (segment->digest_len == 0) {
+- iscsi_tcp_segment_splice_digest(segment,
+- segment->recv_digest);
+- return 0;
++ offset = tcp_ctask->data_offset;
++ sg = sc->request_buffer;
++
++ if (tcp_ctask->data_offset)
++ for (i = 0; i < tcp_ctask->sg_count; i++)
++ offset -= sg[i].length;
++ /* we've passed through partial sg*/
++ if (offset < 0)
++ offset = 0;
++
++ for (i = tcp_ctask->sg_count; i < sc->use_sg; i++) {
++ char *dest;
++
++ dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
++ rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
++ sg[i].length, offset);
++ kunmap_atomic(dest, KM_SOFTIRQ0);
++ if (rc == -EAGAIN)
++ /* continue with the next SKB/PDU */
++ return rc;
++ if (!rc) {
++ if (conn->datadgst_en) {
++ if (!offset)
++ crypto_hash_update(
++ &tcp_conn->rx_hash,
++ &sg[i], sg[i].length);
++ else
++ partial_sg_digest_update(
++ &tcp_conn->rx_hash,
++ &sg[i],
++ sg[i].offset + offset,
++ sg[i].length - offset);
++ }
++ offset = 0;
++ tcp_ctask->sg_count++;
+ }
+- iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
+- segment->total_copied - ISCSI_DIGEST_SIZE,
+- segment->digest);
+
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_HDR_DGST;
++ if (!ctask->data_count) {
++ if (rc && conn->datadgst_en)
++ /*
++ * data-in is complete, but buffer not...
++ */
++ partial_sg_digest_update(&tcp_conn->rx_hash,
++ &sg[i],
++ sg[i].offset,
++ sg[i].length-rc);
++ rc = 0;
++ break;
++ }
++
++ if (!tcp_conn->in.copy)
++ return -EAGAIN;
+ }
++ BUG_ON(ctask->data_count);
+
+- tcp_conn->in.hdr = hdr;
+- return iscsi_tcp_hdr_dissect(conn, hdr);
++done:
++ /* check for non-exceptional status */
++ if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
++ (long)sc, sc->result, ctask->itt,
++ tcp_conn->in.hdr->flags);
++ spin_lock(&conn->session->lock);
++ __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
++ spin_unlock(&conn->session->lock);
++ }
++
++ return rc;
++}
++
++static int
++iscsi_data_recv(struct iscsi_conn *conn)
++{
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int rc = 0, opcode;
++
++ opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
++ switch (opcode) {
++ case ISCSI_OP_SCSI_DATA_IN:
++ rc = iscsi_scsi_data_in(conn);
++ break;
++ case ISCSI_OP_SCSI_CMD_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_ASYNC_EVENT:
++ case ISCSI_OP_REJECT:
++ /*
++ * Collect data segment to the connection's data
++ * placeholder
++ */
++ if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
++ rc = -EAGAIN;
++ goto exit;
++ }
++
++ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
++ tcp_conn->in.datalen);
++ if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
++ iscsi_recv_digest_update(tcp_conn, conn->data,
++ tcp_conn->in.datalen);
++ break;
++ default:
++ BUG_ON(1);
++ }
++exit:
++ return rc;
+ }
+
+ /**
+- * iscsi_tcp_recv - TCP receive in sendfile fashion
++ * iscsi_tcp_data_recv - TCP receive in sendfile fashion
+ * @rd_desc: read descriptor
+ * @skb: socket buffer
+ * @offset: offset in skb
+ * @len: skb->len - offset
+ **/
+ static int
+-iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+- unsigned int offset, size_t len)
++iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
++ unsigned int offset, size_t len)
+ {
++ int rc;
+ struct iscsi_conn *conn = rd_desc->arg.data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->in.segment;
+- struct skb_seq_state seq;
+- unsigned int consumed = 0;
+- int rc = 0;
++ int processed;
++ char pad[ISCSI_PAD_LEN];
++ struct scatterlist sg;
+
+- debug_tcp("in %d bytes\n", skb->len - offset);
++ /*
++ * Save current SKB and its offset in the corresponding
++ * connection context.
++ */
++ tcp_conn->in.copy = skb->len - offset;
++ tcp_conn->in.offset = offset;
++ tcp_conn->in.skb = skb;
++ tcp_conn->in.len = tcp_conn->in.copy;
++ BUG_ON(tcp_conn->in.copy <= 0);
++ debug_tcp("in %d bytes\n", tcp_conn->in.copy);
++
++more:
++ tcp_conn->in.copied = 0;
++ rc = 0;
+
+ if (unlikely(conn->suspend_rx)) {
+ debug_tcp("conn %d Rx suspended!\n", conn->id);
+ return 0;
+ }
+
+- skb_prepare_seq_read(skb, offset, skb->len, &seq);
+- while (1) {
+- unsigned int avail;
+- const u8 *ptr;
++ if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
++ tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
++ rc = iscsi_hdr_extract(tcp_conn);
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto nomore;
++ else {
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++ }
+
+- avail = skb_seq_read(consumed, &ptr, &seq);
+- if (avail == 0) {
+- debug_tcp("no more data avail. Consumed %d\n",
+- consumed);
+- break;
++ /*
++ * Verify and process incoming PDU header.
++ */
++ rc = iscsi_tcp_hdr_recv(conn);
++ if (!rc && tcp_conn->in.datalen) {
++ if (conn->datadgst_en)
++ crypto_hash_init(&tcp_conn->rx_hash);
++ tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
++ } else if (rc) {
++ iscsi_conn_failure(conn, rc);
++ return 0;
+ }
+- BUG_ON(segment->copied >= segment->size);
+-
+- debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
+- rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
+- BUG_ON(rc == 0);
+- consumed += rc;
+-
+- if (segment->total_copied >= segment->total_size) {
+- debug_tcp("segment done\n");
+- rc = segment->done(tcp_conn, segment);
+- if (rc != 0) {
+- skb_abort_seq_read(&seq);
+- goto error;
+- }
++ }
++
++ if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
++ uint32_t recv_digest;
+
+- /* The done() functions sets up the
+- * next segment. */
++ debug_tcp("extra data_recv offset %d copy %d\n",
++ tcp_conn->in.offset, tcp_conn->in.copy);
++ rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto again;
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++
++ memcpy(&recv_digest, conn->data, sizeof(uint32_t));
++ if (recv_digest != tcp_conn->in.datadgst) {
++ debug_tcp("iscsi_tcp: data digest error!"
++ "0x%x != 0x%x\n", recv_digest,
++ tcp_conn->in.datadgst);
++ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
++ return 0;
++ } else {
++ debug_tcp("iscsi_tcp: data digest match!"
++ "0x%x == 0x%x\n", recv_digest,
++ tcp_conn->in.datadgst);
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ }
+ }
+- skb_abort_seq_read(&seq);
+- conn->rxdata_octets += consumed;
+- return consumed;
+
+-error:
+- debug_tcp("Error receiving PDU, errno=%d\n", rc);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- return 0;
++ if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV &&
++ tcp_conn->in.copy) {
++
++ debug_tcp("data_recv offset %d copy %d\n",
++ tcp_conn->in.offset, tcp_conn->in.copy);
++
++ rc = iscsi_data_recv(conn);
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto again;
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++ tcp_conn->in.copy -= tcp_conn->in.padding;
++ tcp_conn->in.offset += tcp_conn->in.padding;
++ if (conn->datadgst_en) {
++ if (tcp_conn->in.padding) {
++ debug_tcp("padding -> %d\n",
++ tcp_conn->in.padding);
++ memset(pad, 0, tcp_conn->in.padding);
++ sg_init_one(&sg, pad, tcp_conn->in.padding);
++ crypto_hash_update(&tcp_conn->rx_hash,
++ &sg, sg.length);
++ }
++ crypto_hash_final(&tcp_conn->rx_hash,
++ (u8 *) &tcp_conn->in.datadgst);
++ debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
++ tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
++ tcp_conn->data_copied = 0;
++ } else
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++ }
++
++ debug_tcp("f, processed %d from out of %d padding %d\n",
++ tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding);
++ BUG_ON(tcp_conn->in.offset - offset > len);
++
++ if (tcp_conn->in.offset - offset != len) {
++ debug_tcp("continue to process %d bytes\n",
++ (int)len - (tcp_conn->in.offset - offset));
++ goto more;
++ }
++
++nomore:
++ processed = tcp_conn->in.offset - offset;
++ BUG_ON(processed == 0);
++ return processed;
++
++again:
++ processed = tcp_conn->in.offset - offset;
++ debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
++ processed, (int)len, (int)rd_desc->count);
++ BUG_ON(processed == 0);
++ BUG_ON(processed > len);
++
++ conn->rxdata_octets += processed;
++ return processed;
+ }
+
+ static void
+ iscsi_tcp_data_ready(struct sock *sk, int flag)
+ {
+ struct iscsi_conn *conn = sk->sk_user_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ read_descriptor_t rd_desc;
+
+ read_lock(&sk->sk_callback_lock);
+
+ /*
+- * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
++ * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv.
+ * We set count to 1 because we want the network layer to
+- * hand us all the skbs that are available. iscsi_tcp_recv
++ * hand us all the skbs that are available. iscsi_tcp_data_recv
+ * handled pdus that cross buffers or pdus that still need data.
+ */
+ rd_desc.arg.data = conn;
+ rd_desc.count = 1;
+- tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
++ tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
+
+ read_unlock(&sk->sk_callback_lock);
+-
+- /* If we had to (atomically) map a highmem page,
+- * unmap it now. */
+- iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+ }
+
+ static void
+@@ -1105,179 +1080,127 @@ iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
+ }
+
+ /**
+- * iscsi_xmit - TCP transmit
+- **/
+-static int
+-iscsi_xmit(struct iscsi_conn *conn)
++ * iscsi_send - generic send routine
++ * @sk: kernel's socket
++ * @buf: buffer to write from
++ * @size: actual size to write
++ * @flags: socket's flags
++ */
++static inline int
++iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->out.segment;
+- unsigned int consumed = 0;
+- int rc = 0;
+-
+- while (1) {
+- rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
+- if (rc < 0)
+- goto error;
+- if (rc == 0)
+- break;
+-
+- consumed += rc;
++ struct socket *sk = tcp_conn->sock;
++ int offset = buf->sg.offset + buf->sent, res;
+
+- if (segment->total_copied >= segment->total_size) {
+- if (segment->done != NULL) {
+- rc = segment->done(tcp_conn, segment);
+- if (rc < 0)
+- goto error;
+- }
+- }
++ /*
++ * if we got use_sg=0 or are sending something we kmallocd
++ * then we did not have to do kmap (kmap returns page_address)
++ *
++ * if we got use_sg > 0, but had to drop down, we do not
++ * set clustering so this should only happen for that
++ * slab case.
++ */
++ if (buf->use_sendmsg)
++ res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
++ else
++ res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
++
++ if (res >= 0) {
++ conn->txdata_octets += res;
++ buf->sent += res;
++ return res;
+ }
+
+- debug_tcp("xmit %d bytes\n", consumed);
+-
+- conn->txdata_octets += consumed;
+- return consumed;
+-
+-error:
+- /* Transmit error. We could initiate error recovery
+- * here. */
+- debug_tcp("Error sending PDU, errno=%d\n", rc);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- return rc;
++ tcp_conn->sendpage_failures_cnt++;
++ if (res == -EAGAIN)
++ res = -ENOBUFS;
++ else
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return res;
+ }
+
+ /**
+- * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+- */
+-static inline int
+-iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
+-{
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->out.segment;
+-
+- return segment->total_copied - segment->total_size;
+-}
+-
++ * iscsi_sendhdr - send PDU Header via tcp_sendpage()
++ * @conn: iscsi connection
++ * @buf: buffer to write from
++ * @datalen: lenght of data to be sent after the header
++ *
++ * Notes:
++ * (Tx, Fast Path)
++ **/
+ static inline int
+-iscsi_tcp_flush(struct iscsi_conn *conn)
++iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
+ {
+- int rc;
+-
+- while (iscsi_tcp_xmit_qlen(conn)) {
+- rc = iscsi_xmit(conn);
+- if (rc == 0)
++ int flags = 0; /* MSG_DONTWAIT; */
++ int res, size;
++
++ size = buf->sg.length - buf->sent;
++ BUG_ON(buf->sent + size > buf->sg.length);
++ if (buf->sent + size != buf->sg.length || datalen)
++ flags |= MSG_MORE;
++
++ res = iscsi_send(conn, buf, size, flags);
++ debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
++ if (res >= 0) {
++ if (size != res)
+ return -EAGAIN;
+- if (rc < 0)
+- return rc;
++ return 0;
+ }
+
+- return 0;
+-}
+-
+-/*
+- * This is called when we're done sending the header.
+- * Simply copy the data_segment to the send segment, and return.
+- */
+-static int
+-iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- tcp_conn->out.segment = tcp_conn->out.data_segment;
+- debug_tcp("Header done. Next segment size %u total_size %u\n",
+- tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
+- return 0;
++ return res;
+ }
+
+-static void
+-iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
++/**
++ * iscsi_sendpage - send one page of iSCSI Data-Out.
++ * @conn: iscsi connection
++ * @buf: buffer to write from
++ * @count: remaining data
++ * @sent: number of bytes sent
++ *
++ * Notes:
++ * (Tx, Fast Path)
++ **/
++static inline int
++iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
++ int *count, int *sent)
+ {
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
+- conn->hdrdgst_en? ", digest enabled" : "");
+-
+- /* Clear the data segment - needs to be filled in by the
+- * caller using iscsi_tcp_send_data_prep() */
+- memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
+-
+- /* If header digest is enabled, compute the CRC and
+- * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
+- * sufficient room.
+- */
+- if (conn->hdrdgst_en) {
+- iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
+- hdr + hdrlen);
+- hdrlen += ISCSI_DIGEST_SIZE;
++ int flags = 0; /* MSG_DONTWAIT; */
++ int res, size;
++
++ size = buf->sg.length - buf->sent;
++ BUG_ON(buf->sent + size > buf->sg.length);
++ if (size > *count)
++ size = *count;
++ if (buf->sent + size != buf->sg.length || *count != size)
++ flags |= MSG_MORE;
++
++ res = iscsi_send(conn, buf, size, flags);
++ debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
++ size, buf->sent, *count, *sent, res);
++ if (res >= 0) {
++ *count -= res;
++ *sent += res;
++ if (size != res)
++ return -EAGAIN;
++ return 0;
+ }
+
+- /* Remember header pointer for later, when we need
+- * to decide whether there's a payload to go along
+- * with the header. */
+- tcp_conn->out.hdr = hdr;
+-
+- iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
+- iscsi_tcp_send_hdr_done, NULL);
++ return res;
+ }
+
+-/*
+- * Prepare the send buffer for the payload data.
+- * Padding and checksumming will all be taken care
+- * of by the iscsi_segment routines.
+- */
+-static int
+-iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+- unsigned int count, unsigned int offset,
+- unsigned int len)
+-{
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct hash_desc *tx_hash = NULL;
+- unsigned int hdr_spec_len;
+-
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
+- tcp_conn, offset, len,
+- conn->datadgst_en? ", digest enabled" : "");
+-
+- /* Make sure the datalen matches what the caller
+- said he would send. */
+- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+- WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+-
+- if (conn->datadgst_en)
+- tx_hash = &tcp_conn->tx_hash;
+-
+- return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
+- sg, count, offset, len,
+- NULL, tx_hash);
+-}
+-
+-static void
+-iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+- size_t len)
++static inline void
++iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
++ struct iscsi_tcp_cmd_task *tcp_ctask)
+ {
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct hash_desc *tx_hash = NULL;
+- unsigned int hdr_spec_len;
+-
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
+- conn->datadgst_en? ", digest enabled" : "");
+-
+- /* Make sure the datalen matches what the caller
+- said he would send. */
+- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+- WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+-
+- if (conn->datadgst_en)
+- tx_hash = &tcp_conn->tx_hash;
+-
+- iscsi_segment_init_linear(&tcp_conn->out.data_segment,
+- data, len, NULL, tx_hash);
++ crypto_hash_init(&tcp_conn->tx_hash);
++ tcp_ctask->digest_count = 4;
+ }
+
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1287,17 +1210,13 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ *
+ * Called under connection lock.
+ **/
+-static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+- struct iscsi_r2t_info *r2t)
++static void
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ struct iscsi_r2t_info *r2t, int left)
+ {
+ struct iscsi_data *hdr;
+- int new_offset, left;
+-
+- BUG_ON(r2t->data_length - r2t->sent < 0);
+- left = r2t->data_length - r2t->sent;
+- if (left == 0)
+- return 0;
++ struct scsi_cmnd *sc = ctask->sc;
++ int new_offset;
+
+ hdr = &r2t->dtask.hdr;
+ memset(hdr, 0, sizeof(struct iscsi_data));
+@@ -1305,8 +1224,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1318,177 +1237,514 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ r2t->data_count = left;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+-
+ conn->dataout_pdus_cnt++;
+- return 1;
++
++ iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
++ sizeof(struct iscsi_hdr));
++
++ if (iscsi_buf_left(&r2t->sendbuf))
++ return;
++
++ if (sc->use_sg) {
++ iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
++ r2t->sg += 1;
++ } else {
++ iscsi_buf_init_iov(&r2t->sendbuf,
++ (char*)sc->request_buffer + new_offset,
++ r2t->data_count);
++ r2t->sg = NULL;
++ }
++}
++
++static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
++ unsigned long len)
++{
++ tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
++ if (!tcp_ctask->pad_count)
++ return;
++
++ tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
++ debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
++ tcp_ctask->xmstate |= XMSTATE_W_PAD;
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
++static void
++iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
++{
++ struct scsi_cmnd *sc = ctask->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++
++ tcp_ctask->sent = 0;
++ tcp_ctask->sg_count = 0;
++
++ if (sc->sc_data_direction == DMA_TO_DEVICE) {
++ tcp_ctask->xmstate = XMSTATE_W_HDR;
++ tcp_ctask->exp_r2tsn = 0;
++ BUG_ON(ctask->total_length == 0);
++
++ if (sc->use_sg) {
++ struct scatterlist *sg = sc->request_buffer;
++
++ iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
++ tcp_ctask->sg = sg + 1;
++ tcp_ctask->bad_sg = sg + sc->use_sg;
++ } else {
++ iscsi_buf_init_iov(&tcp_ctask->sendbuf,
++ sc->request_buffer,
++ sc->request_bufflen);
++ tcp_ctask->sg = NULL;
++ tcp_ctask->bad_sg = NULL;
++ }
++ debug_scsi("cmd [itt 0x%x total %d imm_data %d "
++ "unsol count %d, unsol offset %d]\n",
++ ctask->itt, ctask->total_length, ctask->imm_count,
++ ctask->unsol_count, ctask->unsol_offset);
++ } else
++ tcp_ctask->xmstate = XMSTATE_R_HDR;
++
++ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
++ sizeof(struct iscsi_hdr));
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ *
++ * Management xmit state machine consists of two states:
++ * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
++ * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
++ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
+- int err;
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++ int rc;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
++ debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
++ conn->id, tcp_mtask->xmstate, mtask->itt);
++
++ if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
++ if (mtask->data_count)
++ tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
++ if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
++ conn->stop_stage != STOP_CONN_RECOVER &&
++ conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
++ (u8*)tcp_mtask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
++ mtask->data_count);
++ if (rc) {
++ tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
++ if (mtask->data_count)
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
++ return rc;
++ }
++ }
++
++ if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
++ BUG_ON(!mtask->data_count);
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
++ /* FIXME: implement.
++ * Virtual buffer could be spreaded across multiple pages...
+ */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
++ do {
++ int rc;
++
++ rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
++ &mtask->data_count, &tcp_mtask->sent);
++ if (rc) {
++ tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
++ return rc;
++ }
++ } while (mtask->data_count);
++ }
+
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
++ BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE);
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
+
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
++ spin_lock_bh(&session->lock);
++ list_del(&conn->mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask,
++ sizeof(void*));
++ spin_unlock_bh(&session->lock);
++ }
++ return 0;
++}
++
++static inline int
++iscsi_send_read_hdr(struct iscsi_conn *conn,
++ struct iscsi_tcp_cmd_task *tcp_ctask)
++{
++ int rc;
++
++ tcp_ctask->xmstate &= ~XMSTATE_R_HDR;
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)tcp_ctask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, 0);
++ if (!rc) {
++ BUG_ON(tcp_ctask->xmstate != XMSTATE_IDLE);
++ return 0; /* wait for Data-In */
+ }
++ tcp_ctask->xmstate |= XMSTATE_R_HDR;
++ return rc;
++}
+
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++static inline int
++iscsi_send_write_hdr(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc;
+
+- /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ tcp_ctask->xmstate &= ~XMSTATE_W_HDR;
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)tcp_ctask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
++ if (rc) {
++ tcp_ctask->xmstate |= XMSTATE_W_HDR;
++ return rc;
++ }
+
+- if (!task->imm_count)
+- return 0;
++ if (ctask->imm_count) {
++ tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
++ iscsi_set_padding(tcp_ctask, ctask->imm_count);
+
+- /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
+- if (err)
+- return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ if (ctask->conn->datadgst_en) {
++ iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
++ tcp_ctask->immdigest = 0;
++ }
++ }
++
++ if (ctask->unsol_count)
++ tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
+ return 0;
+ }
+
+-/*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
+- *
+- * We're expected to return 0 when everything was transmitted succesfully,
+- * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+- * of error.
+- */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
+- int rc = 0;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int sent = 0, rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
++ iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
++ tcp_ctask->pad_count);
++ if (conn->datadgst_en)
++ crypto_hash_update(&tcp_conn->tx_hash,
++ &tcp_ctask->sendbuf.sg,
++ tcp_ctask->sendbuf.sg.length);
++ } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
++ return 0;
+
+-flush:
+- /* Flush any pending data first. */
+- rc = iscsi_tcp_flush(conn);
+- if (rc < 0)
+- return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
++ tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
++ debug_scsi("sending %d pad bytes for itt 0x%x\n",
++ tcp_ctask->pad_count, ctask->itt);
++ rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
++ &sent);
++ if (rc) {
++ debug_scsi("padding send failed %d\n", rc);
++ tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
++ }
++ return rc;
++}
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++static int
++iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ struct iscsi_buf *buf, uint32_t *digest)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask;
++ struct iscsi_tcp_conn *tcp_conn;
++ int rc, sent = 0;
++
++ if (!conn->datadgst_en)
+ return 0;
++
++ tcp_ctask = ctask->dd_data;
++ tcp_conn = conn->dd_data;
++
++ if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
++ crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
++ iscsi_buf_init_iov(buf, (char*)digest, 4);
+ }
++ tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
++
++ rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
++ if (!rc)
++ debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
++ ctask->itt);
++ else {
++ debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
++ *digest, ctask->itt);
++ tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
++ }
++ return rc;
++}
+
+- /* Are we done already? */
+- if (sc->sc_data_direction != DMA_TO_DEVICE)
+- return 0;
++static int
++iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
++ struct scatterlist **sg, int *sent, int *count,
++ struct iscsi_buf *digestbuf, uint32_t *digest)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int rc, buf_sent, offset;
++
++ while (*count) {
++ buf_sent = 0;
++ offset = sendbuf->sent;
++
++ rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
++ *sent = *sent + buf_sent;
++ if (buf_sent && conn->datadgst_en)
++ partial_sg_digest_update(&tcp_conn->tx_hash,
++ &sendbuf->sg, sendbuf->sg.offset + offset,
++ buf_sent);
++ if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
++ iscsi_buf_init_sg(sendbuf, *sg);
++ *sg = *sg + 1;
++ }
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (rc)
++ return rc;
++ }
+
+- /* Prepare a header for the unsolicited PDU.
+- * The amount of data we want to send will be
+- * in task->data_count.
+- * FIXME: return the data count instead.
+- */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ rc = iscsi_send_padding(conn, ctask);
++ if (rc)
++ return rc;
++
++ return iscsi_send_digest(conn, ctask, digestbuf, digest);
++}
++
++static int
++iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_data_task *dtask;
++ int rc;
++
++ tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
++ if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
++ dtask = &tcp_ctask->unsol_dtask;
++
++ iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
++ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
++ sizeof(struct iscsi_hdr));
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)dtask->hdrext);
++
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
++ iscsi_set_padding(tcp_ctask, ctask->data_count);
++ }
++
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
++ if (rc) {
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
++ tcp_ctask->xmstate |= XMSTATE_UNS_HDR;
++ return rc;
++ }
++
++ if (conn->datadgst_en) {
++ dtask = &tcp_ctask->unsol_dtask;
++ iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
++ dtask->digest = 0;
++ }
++
++ debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
++ ctask->itt, ctask->unsol_count, tcp_ctask->sent);
++ return 0;
++}
+
+- debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++static int
++iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc;
+
+- iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
++ BUG_ON(!ctask->unsol_count);
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
++send_hdr:
++ rc = iscsi_send_unsol_hdr(conn, ctask);
+ if (rc)
+- goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
+- goto flush;
+- } else {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_r2t_info *r2t;
++ return rc;
++ }
++
++ if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
++ struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
++ int start = tcp_ctask->sent;
+
+- /* All unsolicited PDUs sent. Check for solicited PDUs.
++ rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
++ &tcp_ctask->sent, &ctask->data_count,
++ &dtask->digestbuf, &dtask->digest);
++ ctask->unsol_count -= tcp_ctask->sent - start;
++ if (rc)
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
++ /*
++ * Done with the Data-Out. Next, check if we need
++ * to send another unsolicited Data-Out.
+ */
+- spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
+- if (r2t != NULL) {
+- /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
+- debug_scsi(" done with r2t %p\n", r2t);
+-
+- __kfifo_put(tcp_task->r2tpool.queue,
+- (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
+- }
++ if (ctask->unsol_count) {
++ debug_scsi("sending more uns\n");
++ tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
++ goto send_hdr;
+ }
++ }
++ return 0;
++}
+
+- if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_session *session = conn->session;
++ struct iscsi_r2t_info *r2t;
++ struct iscsi_data_task *dtask;
++ int left, rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ if (!tcp_ctask->r2t) {
++ spin_lock_bh(&session->lock);
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ spin_unlock_bh(&session->lock);
++ }
++send_hdr:
++ r2t = tcp_ctask->r2t;
++ dtask = &r2t->dtask;
++
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &r2t->headbuf,
++ (u8*)dtask->hdrext);
++ rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
++ if (rc) {
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
++ return rc;
+ }
+- spin_unlock_bh(&session->lock);
+
+- /* Waiting for more R2Ts to arrive. */
+- if (r2t == NULL) {
+- debug_tcp("no R2Ts yet\n");
+- return 0;
++ if (conn->datadgst_en) {
++ iscsi_data_digest_init(conn->dd_data, tcp_ctask);
++ dtask->digest = 0;
+ }
+
+- debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
+- r2t->data_offset + r2t->sent, r2t->data_count);
++ iscsi_set_padding(tcp_ctask, r2t->data_count);
++ debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
++ r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
++ r2t->sent);
++ }
+
+- iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+- sizeof(struct iscsi_hdr));
++ if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
++ r2t = tcp_ctask->r2t;
++ dtask = &r2t->dtask;
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
+- r2t->data_offset + r2t->sent,
+- r2t->data_count);
++ rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
++ &r2t->sent, &r2t->data_count,
++ &dtask->digestbuf, &dtask->digest);
+ if (rc)
+- goto fail;
+- tcp_task->sent += r2t->data_count;
+- r2t->sent += r2t->data_count;
+- goto flush;
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
++
++ /*
++ * Done with this Data-Out. Next, check if we have
++ * to send another Data-Out for this R2T.
++ */
++ BUG_ON(r2t->data_length - r2t->sent < 0);
++ left = r2t->data_length - r2t->sent;
++ if (left) {
++ iscsi_solicit_data_cont(conn, ctask, r2t, left);
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ goto send_hdr;
++ }
++
++ /*
++ * Done with this R2T. Check if there are more
++ * outstanding R2Ts ready to be processed.
++ */
++ spin_lock_bh(&session->lock);
++ tcp_ctask->r2t = NULL;
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
++ sizeof(void*));
++ if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
++ sizeof(void*))) {
++ tcp_ctask->r2t = r2t;
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ spin_unlock_bh(&session->lock);
++ goto send_hdr;
++ }
++ spin_unlock_bh(&session->lock);
+ }
+ return 0;
+-fail:
+- iscsi_conn_failure(conn, rc);
+- return -EIO;
++}
++
++static int
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc = 0;
++
++ debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
++ conn->id, tcp_ctask->xmstate, ctask->itt);
++
++ /*
++ * serialize with TMF AbortTask
++ */
++ if (ctask->mtask)
++ return rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_R_HDR)
++ return iscsi_send_read_hdr(conn, tcp_ctask);
++
++ if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
++ rc = iscsi_send_write_hdr(conn, ctask);
++ if (rc)
++ return rc;
++ }
++
++ if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
++ rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
++ &tcp_ctask->sent, &ctask->imm_count,
++ &tcp_ctask->immbuf, &tcp_ctask->immdigest);
++ if (rc)
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
++ }
++
++ rc = iscsi_send_unsol_pdu(conn, ctask);
++ if (rc)
++ return rc;
++
++ rc = iscsi_send_sol_pdu(conn, ctask);
++ if (rc)
++ return rc;
++
++ return rc;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -1498,7 +1754,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,31 +1764,45 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++ /* initial operational parameters */
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+- if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ if (IS_ERR(tcp_conn->tx_hash.tfm)) {
++ printk(KERN_ERR "Could not create connection due to crc32c "
++ "loading error %ld. Make sure the crc32c module is "
++ "built as a module or into the kernel\n",
++ PTR_ERR(tcp_conn->tx_hash.tfm));
++ goto free_tcp_conn;
++ }
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->rx_hash.flags = 0;
+- if (IS_ERR(tcp_conn->rx_hash.tfm))
++ if (IS_ERR(tcp_conn->rx_hash.tfm)) {
++ printk(KERN_ERR "Could not create connection due to crc32c "
++ "loading error %ld. Make sure the crc32c module is "
++ "built as a module or into the kernel\n",
++ PTR_ERR(tcp_conn->rx_hash.tfm));
+ goto free_tx_tfm;
++ }
+
+ return cls_conn;
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "Could not create connection due to crc32c "
+- "loading error. Make sure the crc32c "
+- "module is built as a module or into the "
+- "kernel\n");
++free_tcp_conn:
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1540,21 +1810,18 @@ free_conn:
+ static void
+ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+ {
+- struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct socket *sock = tcp_conn->sock;
+
+- if (!sock)
++ if (!tcp_conn->sock)
+ return;
+
+- sock_hold(sock->sk);
++ sock_hold(tcp_conn->sock->sk);
+ iscsi_conn_restore_callbacks(tcp_conn);
+- sock_put(sock->sk);
++ sock_put(tcp_conn->sock->sk);
+
+- spin_lock_bh(&session->lock);
++ sock_release(tcp_conn->sock);
+ tcp_conn->sock = NULL;
+- spin_unlock_bh(&session->lock);
+- sockfd_put(sock);
++ conn->recv_lock = NULL;
+ }
+
+ static void
+@@ -1564,13 +1831,14 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+@@ -1579,60 +1847,9 @@ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+-
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+-}
+-
+-static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
+- char *buf, int *port,
+- int (*getname)(struct socket *, struct sockaddr *,
+- int *addrlen))
+-{
+- struct sockaddr_storage *addr;
+- struct sockaddr_in6 *sin6;
+- struct sockaddr_in *sin;
+- int rc = 0, len;
+-
+- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+- if (!addr)
+- return -ENOMEM;
+-
+- if (getname(sock, (struct sockaddr *) addr, &len)) {
+- rc = -ENODEV;
+- goto free_addr;
+- }
+-
+- switch (addr->ss_family) {
+- case AF_INET:
+- sin = (struct sockaddr_in *)addr;
+- spin_lock_bh(&conn->session->lock);
+- sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
+- *port = be16_to_cpu(sin->sin_port);
+- spin_unlock_bh(&conn->session->lock);
+- break;
+- case AF_INET6:
+- sin6 = (struct sockaddr_in6 *)addr;
+- spin_lock_bh(&conn->session->lock);
+- sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
+- *port = be16_to_cpu(sin6->sin6_port);
+- spin_unlock_bh(&conn->session->lock);
+- break;
+- }
+-free_addr:
+- kfree(addr);
+- return rc;
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
+ }
+
+ static int
+@@ -1640,8 +1857,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1651,28 +1866,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ /* lookup for existing socket */
+ sock = sockfd_lookup((int)transport_eph, &err);
+ if (!sock) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "sockfd_lookup failed %d\n", err);
++ printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
+ return -EEXIST;
+ }
+- /*
+- * copy these values now because if we drop the session
+- * userspace may still want to query the values since we will
+- * be using them for the reconnect
+- */
+- err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
+- &conn->portal_port, kernel_getpeername);
+- if (err)
+- goto free_socket;
+-
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
+- if (err)
+- goto free_socket;
+
+ err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (err)
+- goto free_socket;
++ return err;
+
+ /* bind iSCSI connection and socket */
+ tcp_conn->sock = sock;
+@@ -1683,17 +1883,38 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+ * set receive state machine into initial state
+ */
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++
+ return 0;
++}
+
+-free_socket:
+- sockfd_put(sock);
+- return err;
++/* called with host lock */
++static void
++iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++
++ iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
++ sizeof(struct iscsi_hdr));
++ tcp_mtask->xmstate = XMSTATE_IMM_HDR;
++ tcp_mtask->sent = 0;
++
++ if (mtask->data_count)
++ iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
++ mtask->data_count);
+ }
+
+ static int
+@@ -1706,8 +1927,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1937,18 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4,
++ (void***)&tcp_ctask->r2ts,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1957,12 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1973,12 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ }
+ }
+
+@@ -1769,6 +1994,9 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
+ switch(param) {
+ case ISCSI_PARAM_HDRDGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
++ if (conn->hdrdgst_en)
++ tcp_conn->hdr_size += sizeof(__u32);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
+@@ -1777,12 +2005,12 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ sscanf(buf, "%d", &value);
+- if (value <= 0 || !is_power_of_2(value))
+- return -EINVAL;
+- if (session->max_r2t == value)
++ if (session->max_r2t == roundup_pow_of_two(value))
+ break;
+ iscsi_r2tpool_free(session);
+ iscsi_set_param(cls_conn, param, buf, buflen);
++ if (session->max_r2t & (session->max_r2t - 1))
++ session->max_r2t = roundup_pow_of_two(session->max_r2t);
+ if (iscsi_r2tpool_alloc(session))
+ return -ENOMEM;
+ break;
+@@ -1798,18 +2026,41 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ struct inet_sock *inet;
++ struct ipv6_pinfo *np;
++ struct sock *sk;
+ int len;
+
+ switch(param) {
+ case ISCSI_PARAM_CONN_PORT:
+- spin_lock_bh(&conn->session->lock);
+- len = sprintf(buf, "%hu\n", conn->portal_port);
+- spin_unlock_bh(&conn->session->lock);
++ mutex_lock(&conn->xmitmutex);
++ if (!tcp_conn->sock) {
++ mutex_unlock(&conn->xmitmutex);
++ return -EINVAL;
++ }
++
++ inet = inet_sk(tcp_conn->sock->sk);
++ len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
++ mutex_unlock(&conn->xmitmutex);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+- spin_lock_bh(&conn->session->lock);
+- len = sprintf(buf, "%s\n", conn->portal_address);
+- spin_unlock_bh(&conn->session->lock);
++ mutex_lock(&conn->xmitmutex);
++ if (!tcp_conn->sock) {
++ mutex_unlock(&conn->xmitmutex);
++ return -EINVAL;
++ }
++
++ sk = tcp_conn->sock->sk;
++ if (sk->sk_family == PF_INET) {
++ inet = inet_sk(sk);
++ len = sprintf(buf, NIPQUAD_FMT "\n",
++ NIPQUAD(inet->daddr));
++ } else {
++ np = inet6_sk(sk);
++ len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr));
++ }
++ mutex_unlock(&conn->xmitmutex);
+ break;
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+@@ -1843,93 +2094,65 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ ctask->hdr = &tcp_ctask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++
++ mtask->hdr = &tcp_mtask->hdr;
++ }
++
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
+-
+-static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+-{
+- blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
+- blk_queue_dma_alignment(sdev->request_queue, 0);
+- return 0;
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static struct scsi_host_template iscsi_sht = {
+- .module = THIS_MODULE,
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+- .sg_tablesize = 4096,
++ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
++ .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .max_sectors = 0xFFFF,
+ .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+- .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+- .slave_configure = iscsi_tcp_slave_configure,
+ .proc_name = "iscsi_tcp",
+ .this_id = -1,
+ };
+@@ -1956,16 +2179,12 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+- ISCSI_TARGET_NAME | ISCSI_TPGT |
+- ISCSI_USERNAME | ISCSI_PASSWORD |
+- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+- ISCSI_HOST_INITIATOR_NAME |
+- ISCSI_HOST_NETDEV_NAME,
++ ISCSI_TARGET_NAME |
++ ISCSI_TPGT,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1978,15 +2197,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+- /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
+- .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_cmd_init,
++ .init_mgmt_task = iscsi_tcp_mgmt_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2217,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..3273683 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -24,61 +24,68 @@
+
+ #include <scsi/libiscsi.h>
+
++/* Socket's Receive state machine */
++#define IN_PROGRESS_WAIT_HEADER 0x0
++#define IN_PROGRESS_HEADER_GATHER 0x1
++#define IN_PROGRESS_DATA_RECV 0x2
++#define IN_PROGRESS_DDIGEST_RECV 0x3
++
++/* xmit state machine */
++#define XMSTATE_IDLE 0x0
++#define XMSTATE_R_HDR 0x1
++#define XMSTATE_W_HDR 0x2
++#define XMSTATE_IMM_HDR 0x4
++#define XMSTATE_IMM_DATA 0x8
++#define XMSTATE_UNS_INIT 0x10
++#define XMSTATE_UNS_HDR 0x20
++#define XMSTATE_UNS_DATA 0x40
++#define XMSTATE_SOL_HDR 0x80
++#define XMSTATE_SOL_DATA 0x100
++#define XMSTATE_W_PAD 0x200
++#define XMSTATE_W_RESEND_PAD 0x400
++#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
++
++#define ISCSI_PAD_LEN 4
++#define ISCSI_SG_TABLESIZE SG_ALL
++#define ISCSI_TCP_MAX_CMD_LEN 16
++
+ struct crypto_hash;
+ struct socket;
+-struct iscsi_tcp_conn;
+-struct iscsi_segment;
+-
+-typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
+- struct iscsi_segment *);
+-
+-struct iscsi_segment {
+- unsigned char *data;
+- unsigned int size;
+- unsigned int copied;
+- unsigned int total_size;
+- unsigned int total_copied;
+-
+- struct hash_desc *hash;
+- unsigned char recv_digest[ISCSI_DIGEST_SIZE];
+- unsigned char digest[ISCSI_DIGEST_SIZE];
+- unsigned int digest_len;
+-
+- struct scatterlist *sg;
+- void *sg_mapped;
+- unsigned int sg_offset;
+-
+- iscsi_segment_done_fn_t *done;
+-};
+
+ /* Socket connection recieve helper */
+ struct iscsi_tcp_recv {
+ struct iscsi_hdr *hdr;
+- struct iscsi_segment segment;
+-
+- /* Allocate buffer for BHS + AHS */
+- uint32_t hdr_buf[64];
++ struct sk_buff *skb;
++ int offset;
++ int len;
++ int hdr_offset;
++ int copy;
++ int copied;
++ int padding;
++ struct iscsi_cmd_task *ctask; /* current cmd in progress */
+
+ /* copied and flipped values */
+ int datalen;
+-};
+-
+-/* Socket connection send helper */
+-struct iscsi_tcp_send {
+- struct iscsi_hdr *hdr;
+- struct iscsi_segment segment;
+- struct iscsi_segment data_segment;
++ int datadgst;
++ char zero_copy_hdr;
+ };
+
+ struct iscsi_tcp_conn {
+ struct iscsi_conn *iscsi_conn;
+ struct socket *sock;
++ struct iscsi_hdr hdr; /* header placeholder */
++ char hdrext[4*sizeof(__u16) +
++ sizeof(__u32)];
++ int data_copied;
+ int stop_stage; /* conn_stop() flag: *
+ * stop to recover, *
+ * stop to terminate */
++ /* iSCSI connection-wide sequencing */
++ int hdr_size; /* PDU header size */
++
+ /* control data */
+ struct iscsi_tcp_recv in; /* TCP receive context */
+- struct iscsi_tcp_send out; /* TCP send context */
++ int in_progress; /* connection state machine */
+
+ /* old values for socket callbacks */
+ void (*old_data_ready)(struct sock *, int);
+@@ -93,14 +100,29 @@ struct iscsi_tcp_conn {
+ uint32_t sendpage_failures_cnt;
+ uint32_t discontiguous_hdr_cnt;
+
+- int error;
+-
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+ };
+
++struct iscsi_buf {
++ struct scatterlist sg;
++ unsigned int sent;
++ char use_sendmsg;
++};
++
+ struct iscsi_data_task {
+ struct iscsi_data hdr; /* PDU */
+- char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
++ char hdrext[sizeof(__u32)]; /* Header-Digest */
++ struct iscsi_buf digestbuf; /* digest buffer */
++ uint32_t digest; /* data digest */
++};
++
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[sizeof(__u32)]; /* Header-Digest */
++ int xmstate; /* mgmt xmit progress */
++ struct iscsi_buf headbuf; /* header buffer */
++ struct iscsi_buf sendbuf; /* in progress buffer */
++ int sent;
+ };
+
+ struct iscsi_r2t_info {
+@@ -108,26 +130,38 @@ struct iscsi_r2t_info {
+ __be32 exp_statsn; /* copied from R2T */
+ uint32_t data_length; /* copied from R2T */
+ uint32_t data_offset; /* copied from R2T */
++ struct iscsi_buf headbuf; /* Data-Out Header Buffer */
++ struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/
+ int sent; /* R2T sequence progress */
+ int data_count; /* DATA-Out payload progress */
++ struct scatterlist *sg; /* per-R2T SG list */
+ int solicit_datasn;
+- struct iscsi_data_task dtask; /* Data-Out header buf */
++ struct iscsi_data_task dtask; /* which data task */
+ };
+
+-struct iscsi_tcp_task {
+- struct iscsi_hdr_buff {
+- struct iscsi_cmd cmd_hdr;
+- char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+- ISCSI_DIGEST_SIZE];
+- } hdr;
+-
++struct iscsi_tcp_cmd_task {
++ struct iscsi_cmd hdr;
++ char hdrext[4*sizeof(__u16)+ /* AHS */
++ sizeof(__u32)]; /* HeaderDigest */
++ char pad[ISCSI_PAD_LEN];
++ int pad_count; /* padded bytes */
++ struct iscsi_buf headbuf; /* header buf (xmit) */
++ struct iscsi_buf sendbuf; /* in progress buffer*/
++ int xmstate; /* xmit xtate machine */
+ int sent;
+- uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
++ struct scatterlist *sg; /* per-cmd SG list */
++ struct scatterlist *bad_sg; /* assert statement */
++ int sg_count; /* SG's to process */
++ uint32_t exp_r2tsn;
+ int data_offset;
+- struct iscsi_r2t_info *r2t; /* in progress R2T */
+- struct iscsi_pool r2tpool;
++ struct iscsi_r2t_info *r2t; /* in progress R2T */
++ struct iscsi_queue r2tpool;
+ struct kfifo *r2tqueue;
+- struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
++ struct iscsi_r2t_info **r2ts;
++ int digest_count;
++ uint32_t immdigest; /* for imm data */
++ struct iscsi_buf immbuf; /* for imm data digest */
++ struct iscsi_data_task unsol_dtask; /* unsol data task */
+ };
+
+ #endif /* ISCSI_H */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..3f5b9b4 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -22,9 +22,9 @@
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+ #include <linux/types.h>
++#include <linux/mutex.h>
+ #include <linux/kfifo.h>
+ #include <linux/delay.h>
+-#include <linux/log2.h>
+ #include <asm/unaligned.h>
+ #include <net/tcp.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -38,211 +38,92 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
+-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+-#define SNA32_CHECK 2147483648UL
+-
+-static int iscsi_sna_lt(u32 n1, u32 n2)
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
+ {
+- return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
+ }
++EXPORT_SYMBOL_GPL(class_to_transport_session);
+
+-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+-static int iscsi_sna_lte(u32 n1, u32 n2)
+-{
+- return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
+-}
++#define INVALID_SN_DELTA 0xffff
+
+-void
+-iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
++int
++iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ {
+ uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
+ uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
+
+- /*
+- * standard specifies this check for when to update expected and
+- * max sequence numbers
+- */
+- if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
+- return;
+-
+- if (exp_cmdsn != session->exp_cmdsn &&
+- !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
++ if (max_cmdsn < exp_cmdsn -1 &&
++ max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
++ return ISCSI_ERR_MAX_CMDSN;
++ if (max_cmdsn > session->max_cmdsn ||
++ max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
++ session->max_cmdsn = max_cmdsn;
++ if (exp_cmdsn > session->exp_cmdsn ||
++ exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
+ session->exp_cmdsn = exp_cmdsn;
+
+- if (max_cmdsn != session->max_cmdsn &&
+- !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+- session->max_cmdsn = max_cmdsn;
+- /*
+- * if the window closed with IO queued, then kick the
+- * xmit thread
+- */
+- if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
+- }
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
++EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
+-{
+- unsigned exp_len = task->hdr_len + len;
+-
+- if (exp_len > task->hdr_max) {
+- WARN_ON(1);
+- return -EINVAL;
+- }
+-
+- WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
+- return 0;
+-}
+-
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
+- int rc;
+-
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
+- if (rc)
+- return rc;
+- hdr->opcode = ISCSI_OP_SCSI_CMD;
+- hdr->flags = ISCSI_ATTR_SIMPLE;
+- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
+- hdr->cmdsn = cpu_to_be32(session->cmdsn);
+- session->cmdsn++;
+- hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
+-
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++
++ hdr->opcode = ISCSI_OP_SCSI_CMD;
++ hdr->flags = ISCSI_ATTR_SIMPLE;
++ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
++ hdr->itt = build_itt(ctask->itt, conn->id, session->age);
++ hdr->data_length = cpu_to_be32(sc->request_bufflen);
++ hdr->cmdsn = cpu_to_be32(session->cmdsn);
++ session->cmdsn++;
++ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
++
++ ctask->data_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,318 +139,117 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->imm_count = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (ctask->total_length >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(ctask->total_length,
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(ctask->hdr->dlength, ctask->imm_count);
+ } else
+- zero_data(hdr->dlength);
++ zero_data(ctask->hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min(session->first_burst,
++ ctask->total_length) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
++ ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
++ ctask->datasn = 0;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+- /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
+-
+- WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+- hdrlength /= ISCSI_PAD_LEN;
+-
+- WARN_ON(hdrlength >= 256);
+- hdr->hlength = hdrlength & 0xFF;
+-
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
+-
+ conn->scsicmd_pdus_cnt++;
+- debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+- return 0;
+ }
++EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
++ struct iscsi_session *session = ctask->conn->session;
++ struct scsi_cmnd *sc = ctask->sc;
+
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+-
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+
+-void __iscsi_get_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- atomic_inc(&task->refcount);
++ atomic_inc(&ctask->refcount);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ spin_lock_bh(&ctask->conn->session->lock);
++ __iscsi_get_ctask(ctask);
++ spin_unlock_bh(&ctask->conn->session->lock);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+-/*
+- * session lock must be held
+- */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
+- int err)
++static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct scsi_cmnd *sc;
+-
+- sc = task->sc;
+- if (!sc)
+- return;
+-
+- if (task->state == ISCSI_TASK_PENDING)
+- /*
+- * cmd never made it to the xmit thread, so we should not count
+- * the cmd in the sequencing
+- */
+- conn->session->queued_cmdsn--;
+- else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
+-
+- sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ spin_lock_bh(&ctask->conn->session->lock);
++ __iscsi_put_ctask(ctask);
++ spin_unlock_bh(&ctask->conn->session->lock);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
+-{
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
+-
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
+-}
+-
+-static struct iscsi_task *
+-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, uint32_t data_size)
+-{
+- struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
+-
+- if (session->state == ISCSI_STATE_TERMINATE)
+- return NULL;
+-
+- if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
+- hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- /*
+- * Login and Text are sent serially, in
+- * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
+- */
+- task = conn->login_task;
+- else {
+- BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+- BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+-
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
+- return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+- }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+-
+- if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
+- } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
+-
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
+-}
+-
+-int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+- char *data, uint32_t data_size)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_session *session = conn->session;
+- int err = 0;
+-
+- spin_lock_bh(&session->lock);
+- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+- err = -EPERM;
+- spin_unlock_bh(&session->lock);
+- return err;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+-
+ /**
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+ * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
+ * then completes the command and task.
+ **/
+-static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
+- int datalen)
++static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ struct iscsi_cmd_task *ctask, char *data,
++ int datalen)
+ {
++ int rc;
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
++
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc) {
++ sc->result = DID_ERROR << 16;
++ goto out;
++ }
+
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+@@ -584,14 +264,13 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+
+ if (datalen < 2) {
+ invalid_datalen:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "Got CHECK_CONDITION but invalid data "
+- "buffer size of %d\n", datalen);
++ printk(KERN_ERR "iscsi: Got CHECK_CONDITION but "
++ "invalid data buffer size of %d\n", datalen);
+ sc->result = DID_BAD_TARGET << 16;
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,36 +280,28 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ if (sc->sc_data_direction == DMA_TO_DEVICE)
++ goto out;
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+- ISCSI_FLAG_CMD_OVERFLOW)) {
++ if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+- if (res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+- scsi_set_resid(sc, res_count);
++ if (res_count > 0 && res_count <= sc->request_bufflen)
++ sc->resid = res_count;
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++ else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
++ sc->resid = be32_to_cpu(rhdr->residual_count);
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
++ return rc;
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -640,42 +311,18 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ conn->tmfrsp_pdus_cnt++;
+
+- if (conn->tmf_state != TMF_QUEUED)
++ if (conn->tmabort_state != TMABORT_INITIAL)
+ return;
+
+ if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
+- conn->tmf_state = TMF_SUCCESS;
++ conn->tmabort_state = TMABORT_SUCCESS;
+ else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
+- conn->tmf_state = TMF_NOT_FOUND;
++ conn->tmabort_state = TMABORT_NOT_FOUND;
+ else
+- conn->tmf_state = TMF_FAILED;
++ conn->tmabort_state = TMABORT_FAILED;
+ wake_up(&conn->ehwait);
+ }
+
+-static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+-{
+- struct iscsi_nopout hdr;
+- struct iscsi_task *task;
+-
+- if (!rhdr && conn->ping_task)
+- return;
+-
+- memset(&hdr, 0, sizeof(struct iscsi_nopout));
+- hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
+- hdr.flags = ISCSI_FLAG_CMD_FINAL;
+-
+- if (rhdr) {
+- memcpy(hdr.lun, rhdr->lun, 8);
+- hdr.ttt = rhdr->ttt;
+- hdr.itt = RESERVED_ITT;
+- } else
+- hdr.ttt = RESERVED_ITT;
+-
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
+- iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+-}
+-
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+ {
+@@ -692,41 +339,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
+ memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
+ itt = get_itt(rejected_pdu.itt);
+- iscsi_conn_printk(KERN_ERR, conn,
+- "itt 0x%x had pdu (op 0x%x) rejected "
+- "due to DataDigest error.\n", itt,
+- rejected_pdu.opcode);
++ printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
++ "due to DataDigest error.\n", itt,
++ rejected_pdu.opcode);
+ }
+ }
+ return 0;
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -742,24 +363,105 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+- conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ rc = iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ rc = iscsi_check_assign_cmdsn(session,
++ (struct iscsi_nopin*)hdr);
++ if (rc)
++ goto done;
++
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ list_del(&mtask->running);
++ if (conn->login_mtask != mtask)
++ __kfifo_put(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ list_del(&mtask->running);
++ if (conn->login_mtask != mtask)
++ __kfifo_put(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
++ rc = iscsi_check_assign_cmdsn(session,
++ (struct iscsi_nopin*)hdr);
++ if (rc)
++ goto done;
+
+ switch(opcode) {
+ case ISCSI_OP_NOOP_IN:
+@@ -771,7 +473,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
+ break;
+
+- iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
++ rc = ISCSI_ERR_CONN_FAILED;
+ break;
+ case ISCSI_OP_REJECT:
+ rc = iscsi_handle_reject(conn, hdr, data, datalen);
+@@ -785,101 +488,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
++done:
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+@@ -896,63 +508,55 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ printk(KERN_ERR "iscsi: received itt %x expected "
++ "session age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
++ (conn->id << ISCSI_CID_SHIFT)) {
++ printk(KERN_ERR "iscsi: received itt %x, expected "
++ "CID (%x)\n", (__force u32)hdr->itt, conn->id);
++ return ISCSI_ERR_BAD_ITT;
++ }
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ printk(KERN_INFO "iscsi: dropping ctask with "
++ "itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ printk(KERN_ERR "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,55 +578,29 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
+-static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
+ {
+- struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc, was_logout = 0;
+
+- /*
+- * Check for iSCSI window and take care of CmdSN wrap-around
+- */
+- if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
+- debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u "
+- "CmdSN %u/%u\n", session->exp_cmdsn,
+- session->max_cmdsn, session->cmdsn,
+- session->queued_cmdsn);
+- return -ENOSPC;
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
++ conn->session->state = ISCSI_STATE_IN_RECOVERY;
++ iscsi_block_session(session_to_cls(conn->session));
++ was_logout = 1;
+ }
+- return 0;
+-}
+-
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
+-{
+- struct iscsi_task *task = conn->task;
+- int rc;
+-
+- __iscsi_get_task(task);
+- spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
+- spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
+- if (!rc)
+- /* done with this task */
+- conn->task = NULL;
+- return rc;
+-}
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ if (rc)
++ return rc;
+
+-/**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
+- *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
+- */
+-void iscsi_requeue_task(struct iscsi_task *task)
+-{
+- struct iscsi_conn *conn = task->conn;
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
+
+- list_move_tail(&task->running, &conn->requeue);
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ if (was_logout) {
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++ return -ENODATA;
++ }
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1035,94 +613,106 @@ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+ **/
+ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ {
++ struct iscsi_transport *tt;
+ int rc = 0;
+
+- spin_lock_bh(&conn->session->lock);
+ if (unlikely(conn->suspend_tx)) {
+ debug_scsi("conn %d Tx suspended!\n", conn->id);
+- spin_unlock_bh(&conn->session->lock);
+ return -ENODATA;
+ }
+-
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
+- if (rc)
+- goto again;
+- }
++ tt = conn->session->tt;
+
+ /*
+- * process mgmt pdus like nops before commands since we should
+- * only have one nop-out as a ping from us and targets should not
+- * overflow us with nop-ins
++ * Transmit in the following order:
++ *
++ * 1) un-finished xmit (ctask or mtask)
++ * 2) immediate control PDUs
++ * 3) write data
++ * 4) SCSI commands
++ * 5) non-immediate control PDUs
++ *
++ * No need to lock around __kfifo_get as long as
++ * there's one producer and one consumer.
+ */
+-check_mgmt:
+- while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
+- continue;
+- }
+- rc = iscsi_xmit_task(conn);
+- if (rc)
+- goto again;
+- }
+
+- /* process pending command queue */
+- while (!list_empty(&conn->xmitqueue)) {
+- if (conn->tmf_state == TMF_QUEUED)
+- break;
++ BUG_ON(conn->ctask && conn->mtask);
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
+- continue;
+- }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
+- continue;
+- }
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ iscsi_get_ctask(conn->ctask);
++ rc = tt->xmit_cmd_task(conn, conn->ctask);
++ iscsi_put_ctask(conn->ctask);
+ if (rc)
+ goto again;
+- /*
+- * we could continuously get new task requests so
+- * we need to check the mgmt queue for nops that need to
+- * be sent to aviod starvation
+- */
+- if (!list_empty(&conn->mgmtqueue))
+- goto check_mgmt;
++ /* done with this in-progress ctask */
++ conn->ctask = NULL;
++ }
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
+ }
+
+- while (!list_empty(&conn->requeue)) {
+- if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
+- break;
++ /* process immediate first */
++ if (unlikely(__kfifo_len(conn->immqueue))) {
++ while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
++ sizeof(void*))) {
++ spin_lock_bh(&conn->session->lock);
++ list_add_tail(&conn->mtask->running,
++ &conn->mgmt_run_list);
++ spin_unlock_bh(&conn->session->lock);
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
++ }
++ }
+
++ /* process command queue */
++ spin_lock_bh(&conn->session->lock);
++ while (!list_empty(&conn->xmitqueue)) {
+ /*
+- * we always do fastlogout - conn stop code will clean up.
++ * iscsi tcp may readd the task to the xmitqueue to send
++ * write data
+ */
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- break;
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ __iscsi_get_ctask(conn->ctask);
++ spin_unlock_bh(&conn->session->lock);
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
+- if (rc)
++ rc = tt->xmit_cmd_task(conn, conn->ctask);
++
++ spin_lock_bh(&conn->session->lock);
++ __iscsi_put_ctask(conn->ctask);
++ if (rc) {
++ spin_unlock_bh(&conn->session->lock);
+ goto again;
+- if (!list_empty(&conn->mgmtqueue))
+- goto check_mgmt;
++ }
+ }
+ spin_unlock_bh(&conn->session->lock);
++ /* done with this ctask */
++ conn->ctask = NULL;
++
++ /* process the rest control plane PDUs, if any */
++ if (unlikely(__kfifo_len(conn->mgmtqueue))) {
++ while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
++ sizeof(void*))) {
++ spin_lock_bh(&conn->session->lock);
++ list_add_tail(&conn->mtask->running,
++ &conn->mgmt_run_list);
++ spin_unlock_bh(&conn->session->lock);
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
++ }
++ }
++
+ return -ENODATA;
+
+ again:
+ if (unlikely(conn->suspend_tx))
+- rc = -ENODATA;
+- spin_unlock_bh(&conn->session->lock);
++ return -ENODATA;
++
+ return rc;
+ }
+
+@@ -1134,9 +724,11 @@ static void iscsi_xmitworker(struct work_struct *work)
+ /*
+ * serialize Xmit worker on a per-connection basis.
+ */
++ mutex_lock(&conn->xmitmutex);
+ do {
+ rc = iscsi_data_xmit(conn);
+ } while (rc >= 0 || rc == -EAGAIN);
++ mutex_unlock(&conn->xmitmutex);
+ }
+
+ enum {
+@@ -1148,36 +740,25 @@ enum {
+ FAILURE_SESSION_TERMINATE,
+ FAILURE_SESSION_IN_RECOVERY,
+ FAILURE_SESSION_RECOVERY_TIMEOUT,
+- FAILURE_SESSION_LOGGING_OUT,
+- FAILURE_SESSION_NOT_READY,
+ };
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+ sc->SCp.ptr = NULL;
+
+ host = sc->device->host;
+- spin_unlock(host->host_lock);
++ session = iscsi_hostdata(host->hostdata);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
+- if (reason) {
+- sc->result = reason;
+- goto fault;
+- }
+-
+ /*
+ * ISCSI_STATE_FAILED is a temp. state. The recovery
+ * code will decide what is best to do with command queued
+@@ -1191,95 +772,77 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ * be entering our queuecommand while a block is starting
+ * up because the block code is not locked)
+ */
+- switch (session->state) {
+- case ISCSI_STATE_IN_RECOVERY:
++ if (session->state == ISCSI_STATE_IN_RECOVERY) {
+ reason = FAILURE_SESSION_IN_RECOVERY;
+- sc->result = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_STATE_LOGGING_OUT:
+- reason = FAILURE_SESSION_LOGGING_OUT;
+- sc->result = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_STATE_RECOVERY_FAILED:
++ goto reject;
++ }
++
++ if (session->state == ISCSI_STATE_RECOVERY_FAILED)
+ reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
+- sc->result = DID_NO_CONNECT << 16;
+- break;
+- case ISCSI_STATE_TERMINATE:
++ else if (session->state == ISCSI_STATE_TERMINATE)
+ reason = FAILURE_SESSION_TERMINATE;
+- sc->result = DID_NO_CONNECT << 16;
+- break;
+- default:
++ else
+ reason = FAILURE_SESSION_FREED;
+- sc->result = DID_NO_CONNECT << 16;
+- }
+ goto fault;
+ }
+
++ /*
++ * Check for iSCSI window and take care of CmdSN wrap-around
++ */
++ if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
++ reason = FAILURE_WINDOW_CLOSED;
++ goto reject;
++ }
++
+ conn = session->leadconn;
+ if (!conn) {
+ reason = FAILURE_SESSION_FREED;
+- sc->result = DID_NO_CONNECT << 16;
+ goto fault;
+ }
+
+- if (iscsi_check_cmdsn_window_closed(conn)) {
+- reason = FAILURE_WINDOW_CLOSED;
+- goto reject;
+- }
+-
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
+-
+- session->queued_cmdsn++;
++ sc->SCp.ptr = (char *)ctask;
++
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->mtask = NULL;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++ ctask->total_length = sc->request_bufflen;
++ iscsi_prep_scsi_cmd_pdu(ctask);
++
++ session->tt->init_cmd_task(ctask);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
++ debug_scsi(
++ "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
++ "win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ spin_unlock(&session->lock);
+- spin_lock(host->host_lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ return 0;
+
+ reject:
+ spin_unlock(&session->lock);
+ debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
+- spin_lock(host->host_lock);
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ fault:
+ spin_unlock(&session->lock);
+- debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
+- spin_lock(host->host_lock);
++ printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
++ sc->cmnd[0], reason);
++ sc->result = (DID_NO_CONNECT << 16);
++ sc->resid = sc->request_bufflen;
++ sc->scsi_done(sc);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_queuecommand);
+@@ -1293,15 +856,106 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
++static int
++iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++ struct iscsi_mgmt_task *mtask;
++
++ spin_lock_bh(&session->lock);
++ if (session->state == ISCSI_STATE_TERMINATE) {
++ spin_unlock_bh(&session->lock);
++ return -EPERM;
++ }
++ if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
++ hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ /*
++ * Login and Text are sent serially, in
++ * request-followed-by-response sequence.
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
++ */
++ mtask = conn->login_mtask;
++ else {
++ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
++ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
++
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*))) {
++ spin_unlock_bh(&session->lock);
++ return -ENOSPC;
++ }
++ }
++
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, conn->id, session->age);
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE))
++ session->cmdsn++;
++ } else
++ /* do not advance CmdSN */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++
++ if (data_size) {
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
++ } else
++ mtask->data_count = 0;
++
++ INIT_LIST_HEAD(&mtask->running);
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask, data, data_size);
++ spin_unlock_bh(&session->lock);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode, hdr->itt, data_size);
++
++ /*
++ * since send_pdu() could be called at least from two contexts,
++ * we need to serialize __kfifo_put, so we don't have to take
++ * additional lock on fast data-path
++ */
++ if (hdr->opcode & ISCSI_OP_IMMEDIATE)
++ __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
++ else
++ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
++
++ scsi_queue_work(session->host, &conn->xmitwork);
++ return 0;
++}
++
++int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_conn *conn = cls_conn->dd_data;
++ int rc;
++
++ mutex_lock(&conn->xmitmutex);
++ rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
++ mutex_unlock(&conn->xmitmutex);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
++
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
++ struct iscsi_conn *conn = session->leadconn;
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+ session->state = ISCSI_STATE_RECOVERY_FAILED;
+- if (session->leadconn)
+- wake_up(&session->leadconn->ehwait);
++ if (conn)
++ wake_up(&conn->ehwait);
+ }
+ spin_unlock_bh(&session->lock);
+ }
+@@ -1309,32 +963,33 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
++ int fail_session = 0;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
+-
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+ failed:
+ debug_scsi("failing host reset: session terminated "
+ "[CID %d age %d]\n", conn->id, session->age);
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+
++ if (sc->SCp.phase == session->age) {
++ debug_scsi("failing connection CID %d due to SCSI host reset\n",
++ conn->id);
++ fail_session = 1;
++ }
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
+ /*
+ * we drop the lock here but the leadconn cannot be destoyed while
+ * we are in the scsi eh
+ */
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ if (fail_session)
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+
+ debug_scsi("iscsi_eh_host_reset wait for relogin\n");
+ wait_event_interruptible(conn->ehwait,
+@@ -1344,717 +999,472 @@ failed:
+ if (signal_pending(current))
+ flush_signals(current);
+
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_LOGGED_IN)
+- iscsi_session_printk(KERN_INFO, session,
+- "host reset succeeded\n");
++ printk(KERN_INFO "iscsi: host reset succeeded\n");
+ else
+ goto failed;
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
+ return SUCCESS;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
+
+-static void iscsi_tmf_timedout(unsigned long data)
++static void iscsi_tmabort_timedout(unsigned long data)
+ {
+- struct iscsi_conn *conn = (struct iscsi_conn *)data;
++ struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&session->lock);
+- if (conn->tmf_state == TMF_QUEUED) {
+- conn->tmf_state = TMF_TIMEDOUT;
+- debug_scsi("tmf timedout\n");
++ if (conn->tmabort_state == TMABORT_INITIAL) {
++ conn->tmabort_state = TMABORT_TIMEDOUT;
++ debug_scsi("tmabort timedout [sc %p itt 0x%x]\n",
++ ctask->sc, ctask->itt);
+ /* unblock eh_abort() */
+ wake_up(&conn->ehwait);
+ }
+ spin_unlock(&session->lock);
+ }
+
+-static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+- struct iscsi_tm *hdr, int age,
+- int timeout)
++/* must be called with the mutex lock */
++static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
++ struct iscsi_cmd_task *ctask)
+ {
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_tm *hdr = &conn->tmhdr;
++ int rc;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+- NULL, 0);
+- if (!task) {
+- spin_unlock_bh(&session->lock);
++ /*
++ * ctask timed out but session is OK requests must be serialized.
++ */
++ memset(hdr, 0, sizeof(struct iscsi_tm));
++ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
++ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
++ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
++
++ rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
++ NULL, 0);
++ if (rc) {
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- spin_lock_bh(&session->lock);
+- debug_scsi("tmf exec failure\n");
+- return -EPERM;
++ debug_scsi("abort sent failure [itt 0x%x] %d\n", ctask->itt,
++ rc);
++ return rc;
+ }
+- conn->tmfcmd_pdus_cnt++;
+- conn->tmf_timer.expires = timeout * HZ + jiffies;
+- conn->tmf_timer.function = iscsi_tmf_timedout;
+- conn->tmf_timer.data = (unsigned long)conn;
+- add_timer(&conn->tmf_timer);
+- debug_scsi("tmf set timeout\n");
+
++ debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
++
++ spin_lock_bh(&session->lock);
++ ctask->mtask = (struct iscsi_mgmt_task *)
++ session->mgmt_cmds[get_itt(hdr->itt) -
++ ISCSI_MGMT_ITT_OFFSET];
++
++ if (conn->tmabort_state == TMABORT_INITIAL) {
++ conn->tmfcmd_pdus_cnt++;
++ conn->tmabort_timer.expires = 10*HZ + jiffies;
++ conn->tmabort_timer.function = iscsi_tmabort_timedout;
++ conn->tmabort_timer.data = (unsigned long)ctask;
++ add_timer(&conn->tmabort_timer);
++ debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
++ }
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++ mutex_unlock(&conn->xmitmutex);
+
+ /*
+ * block eh thread until:
+ *
+- * 1) tmf response
+- * 2) tmf timeout
++ * 1) abort response
++ * 2) abort timeout
+ * 3) session is terminated or restarted or userspace has
+ * given up on recovery
+ */
+- wait_event_interruptible(conn->ehwait, age != session->age ||
++ wait_event_interruptible(conn->ehwait,
++ sc->SCp.phase != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN ||
+- conn->tmf_state != TMF_QUEUED);
++ conn->tmabort_state != TMABORT_INITIAL);
+ if (signal_pending(current))
+ flush_signals(current);
+- del_timer_sync(&conn->tmf_timer);
++ del_timer_sync(&conn->tmabort_timer);
+
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
+- if (age != session->age ||
+- session->state != ISCSI_STATE_LOGGED_IN)
+- return -ENOTCONN;
++ mutex_lock(&conn->xmitmutex);
+ return 0;
+ }
+
+ /*
+- * Fail commands. session lock held and recv side suspended and xmit
+- * thread flushed
++ * xmit mutex and session lock must be held
+ */
+-static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+- int error)
++static struct iscsi_mgmt_task *
++iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
+ {
+- struct iscsi_task *task, *tmp;
++ int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
++ struct iscsi_mgmt_task *task;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ debug_scsi("searching %d tasks\n", nr_tasks);
+
+- /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
+- }
+- }
++ for (i = 0; i < nr_tasks; i++) {
++ __kfifo_get(fifo, (void*)&task, sizeof(void*));
++ debug_scsi("check task %u\n", task->itt);
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ if (task->itt == itt) {
++ debug_scsi("matched task\n");
++ return task;
+ }
+- }
+
+- /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
+- }
++ __kfifo_put(fifo, (void*)&task, sizeof(void*));
+ }
++ return NULL;
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
+-{
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+-
+-static void iscsi_start_tx(struct iscsi_conn *conn)
+-{
+- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-}
+-
+-static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
++static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+- enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+-
+- cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
+-
+- debug_scsi("scsi cmd %p timedout\n", scmd);
+-
+- spin_lock(&session->lock);
+- if (session->state != ISCSI_STATE_LOGGED_IN) {
+- /*
+- * We are probably in the middle of iscsi recovery so let
+- * that complete and handle the error.
+- */
+- rc = EH_RESET_TIMER;
+- goto done;
+- }
++ struct iscsi_conn *conn = ctask->conn;
++ struct iscsi_session *session = conn->session;
+
+- conn = session->leadconn;
+- if (!conn) {
+- /* In the middle of shuting down */
+- rc = EH_RESET_TIMER;
+- goto done;
+- }
++ if (!ctask->mtask)
++ return -EINVAL;
+
+- if (!conn->recv_timeout && !conn->ping_timeout)
+- goto done;
+- /*
+- * if the ping timedout then we are in the middle of cleaning up
+- * and can let the iscsi eh handle it
+- */
+- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+- (conn->ping_timeout * HZ), jiffies))
+- rc = EH_RESET_TIMER;
+- /*
+- * if we are about to check the transport then give the command
+- * more time
+- */
+- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
+- jiffies))
+- rc = EH_RESET_TIMER;
+- /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
+- rc = EH_RESET_TIMER;
+-done:
+- spin_unlock(&session->lock);
+- debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
+- return rc;
++ if (!iscsi_remove_mgmt_task(conn->immqueue, ctask->mtask->itt))
++ list_del(&ctask->mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
++ sizeof(void*));
++ ctask->mtask = NULL;
++ return 0;
+ }
+
+-static void iscsi_check_transport_timeouts(unsigned long data)
++/*
++ * session lock and xmitmutex must be held
++ */
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ int err)
+ {
+- struct iscsi_conn *conn = (struct iscsi_conn *)data;
+- struct iscsi_session *session = conn->session;
+- unsigned long recv_timeout, next_timeout = 0, last_recv;
++ struct scsi_cmnd *sc;
+
+- spin_lock(&session->lock);
+- if (session->state != ISCSI_STATE_LOGGED_IN)
+- goto done;
+-
+- recv_timeout = conn->recv_timeout;
+- if (!recv_timeout)
+- goto done;
+-
+- recv_timeout *= HZ;
+- last_recv = conn->last_recv;
+- if (conn->ping_task &&
+- time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+- jiffies)) {
+- iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+- "expired, last rx %lu, last ping %lu, "
+- "now %lu\n", conn->ping_timeout, last_recv,
+- conn->last_ping, jiffies);
+- spin_unlock(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ sc = ctask->sc;
++ if (!sc)
+ return;
+- }
+
+- if (time_before_eq(last_recv + recv_timeout, jiffies)) {
+- /* send a ping to try to provoke some traffic */
+- debug_scsi("Sending nopout as ping on conn %p\n", conn);
+- iscsi_send_nopout(conn, NULL);
+- next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+- } else
+- next_timeout = last_recv + recv_timeout;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
++ iscsi_ctask_mtask_cleanup(ctask);
+
+- debug_scsi("Setting next tmo %lu\n", next_timeout);
+- mod_timer(&conn->transport_timer, next_timeout);
+-done:
+- spin_unlock(&session->lock);
+-}
+-
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
+- struct iscsi_tm *hdr)
+-{
+- memset(hdr, 0, sizeof(*hdr));
+- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+- hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ sc->result = err;
++ sc->resid = sc->request_bufflen;
++ /* release ref from queuecommand */
++ __iscsi_put_ctask(ctask);
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
+- struct iscsi_tm *hdr;
+- int rc, age;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ struct iscsi_session *session;
++ int rc;
+
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ debug_scsi("sc never reached iscsi layer or it completed.\n");
+- spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+ }
+
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ conn = ctask->conn;
++ session = conn->session;
++
++ conn->eh_abort_cnt++;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
++
++ mutex_lock(&conn->xmitmutex);
++ spin_lock_bh(&session->lock);
++
+ /*
+ * If we are not logged in or we have started a new session
+ * then let the host reset code handle this
+ */
+- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
+- sc->SCp.phase != session->age) {
+- spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+- return FAILED;
+- }
+-
+- conn = session->leadconn;
+- conn->eh_abort_cnt++;
+- age = session->age;
+-
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ if (session->state != ISCSI_STATE_LOGGED_IN ||
++ sc->SCp.phase != session->age)
++ goto failed;
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
++ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+- goto success;
++ goto success_rel_mutex;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
+- goto success;
++ /* what should we do here ? */
++ if (conn->ctask == ctask) {
++ printk(KERN_INFO "iscsi: sc %p itt 0x%x partially sent. "
++ "Failing abort\n", sc, ctask->itt);
++ goto failed;
+ }
+
+- /* only have one tmf outstanding at a time */
+- if (conn->tmf_state != TMF_INITIAL)
+- goto failed;
+- conn->tmf_state = TMF_QUEUED;
++ if (ctask->state == ISCSI_TASK_PENDING)
++ goto success_cleanup;
+
+- hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ conn->tmabort_state = TMABORT_INITIAL;
+
+- if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+- rc = FAILED;
+- goto failed;
+- }
++ spin_unlock_bh(&session->lock);
++ rc = iscsi_exec_abort_task(sc, ctask);
++ spin_lock_bh(&session->lock);
+
+- switch (conn->tmf_state) {
+- case TMF_SUCCESS:
+- spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+- iscsi_suspend_tx(conn);
+- /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
+- */
+- spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
+- conn->tmf_state = TMF_INITIAL;
+- spin_unlock(&session->lock);
+- iscsi_start_tx(conn);
+- goto success_unlocked;
+- case TMF_TIMEDOUT:
+- spin_unlock_bh(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- goto failed_unlocked;
+- case TMF_NOT_FOUND:
+- if (!sc->SCp.ptr) {
+- conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ if (rc || sc->SCp.phase != session->age ||
++ session->state != ISCSI_STATE_LOGGED_IN)
++ goto failed;
++ iscsi_ctask_mtask_cleanup(ctask);
++
++ switch (conn->tmabort_state) {
++ case TMABORT_SUCCESS:
++ goto success_cleanup;
++ case TMABORT_NOT_FOUND:
++ if (!ctask->sc) {
++ /* ctask completed before tmf abort response */
++ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+- goto success;
++ goto success_rel_mutex;
+ }
+ /* fall through */
+ default:
+- conn->tmf_state = TMF_INITIAL;
++ /* timedout or failed */
++ spin_unlock_bh(&session->lock);
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ spin_lock_bh(&session->lock);
+ goto failed;
+ }
+
+-success:
++success_cleanup:
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ spin_unlock_bh(&session->lock);
+-success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
+- mutex_unlock(&session->eh_mutex);
+- return SUCCESS;
+
+-failed:
+- spin_unlock_bh(&session->lock);
+-failed_unlocked:
+- debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
+- mutex_unlock(&session->eh_mutex);
+- return FAILED;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+-
+-static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+-{
+- memset(hdr, 0, sizeof(*hdr));
+- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+- hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->rtt = RESERVED_ITT;
+-}
+-
+-int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+-{
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+- struct iscsi_tm *hdr;
+- int rc = FAILED;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+- debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+-
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+ /*
+- * Just check if we are not logged in. We cannot check for
+- * the phase because the reset could come from a ioctl.
++ * clean up task if aborted. we have the xmitmutex so grab
++ * the recv lock as a writer
+ */
+- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+- goto unlock;
+- conn = session->leadconn;
+-
+- /* only have one tmf outstanding at a time */
+- if (conn->tmf_state != TMF_INITIAL)
+- goto unlock;
+- conn->tmf_state = TMF_QUEUED;
+-
+- hdr = &conn->tmhdr;
+- iscsi_prep_lun_reset_pdu(sc, hdr);
+-
+- if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+- session->lu_reset_timeout)) {
+- rc = FAILED;
+- goto unlock;
+- }
+-
+- switch (conn->tmf_state) {
+- case TMF_SUCCESS:
+- break;
+- case TMF_TIMEDOUT:
+- spin_unlock_bh(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- goto done;
+- default:
+- conn->tmf_state = TMF_INITIAL;
+- goto unlock;
+- }
+-
+- rc = SUCCESS;
+- spin_unlock_bh(&session->lock);
+-
+- iscsi_suspend_tx(conn);
+-
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_all_commands(conn, sc->device->lun, DID_ERROR);
+- conn->tmf_state = TMF_INITIAL;
++ fail_command(conn, ctask, DID_ABORT << 16);
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+- iscsi_start_tx(conn);
+- goto done;
++success_rel_mutex:
++ mutex_unlock(&conn->xmitmutex);
++ return SUCCESS;
+
+-unlock:
++failed:
+ spin_unlock_bh(&session->lock);
+-done:
+- debug_scsi("iscsi_eh_device_reset %s\n",
+- rc == SUCCESS ? "SUCCESS" : "FAILED");
+- mutex_unlock(&session->eh_mutex);
+- return rc;
++ mutex_unlock(&conn->xmitmutex);
++
++ debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
++ return FAILED;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
++EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+
+-/*
+- * Pre-allocate a pool of @max items of @item_size. By default, the pool
+- * should be accessed via kfifo_{get,put} on q->queue.
+- * Optionally, the caller can obtain the array of object pointers
+- * by passing in a non-NULL @items pointer
+- */
+ int
+-iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
++iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
+ {
+- int i, num_arrays = 1;
++ int i;
+
+- memset(q, 0, sizeof(*q));
++ *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
++ if (*items == NULL)
++ return -ENOMEM;
+
+ q->max = max;
+-
+- /* If the user passed an items pointer, he wants a copy of
+- * the array. */
+- if (items)
+- num_arrays++;
+- q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+- if (q->pool == NULL)
+- goto enomem;
++ q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
++ if (q->pool == NULL) {
++ kfree(*items);
++ return -ENOMEM;
++ }
+
+ q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
+ GFP_KERNEL, NULL);
+- if (q->queue == ERR_PTR(-ENOMEM))
+- goto enomem;
++ if (q->queue == ERR_PTR(-ENOMEM)) {
++ kfree(q->pool);
++ kfree(*items);
++ return -ENOMEM;
++ }
+
+ for (i = 0; i < max; i++) {
+- q->pool[i] = kzalloc(item_size, GFP_KERNEL);
++ q->pool[i] = kmalloc(item_size, GFP_KERNEL);
+ if (q->pool[i] == NULL) {
+- q->max = i;
+- goto enomem;
++ int j;
++
++ for (j = 0; j < i; j++)
++ kfree(q->pool[j]);
++
++ kfifo_free(q->queue);
++ kfree(q->pool);
++ kfree(*items);
++ return -ENOMEM;
+ }
++ memset(q->pool[i], 0, item_size);
++ (*items)[i] = q->pool[i];
+ __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
+ }
+-
+- if (items) {
+- *items = q->pool + max;
+- memcpy(*items, q->pool, max * sizeof(void *));
+- }
+-
+ return 0;
+-
+-enomem:
+- iscsi_pool_free(q);
+- return -ENOMEM;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_init);
+
+-void iscsi_pool_free(struct iscsi_pool *q)
++void iscsi_pool_free(struct iscsi_queue *q, void **items)
+ {
+ int i;
+
+ for (i = 0; i < q->max; i++)
+- kfree(q->pool[i]);
+- if (q->pool)
+- kfree(q->pool);
++ kfree(items[i]);
++ kfree(q->pool);
++ kfree(items);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
+- }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
+- }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ *hostno = shost->host_no;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
+- return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+- session->fast_abort = 1;
+- session->lu_reset_timeout = 15;
+- session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
+- session->queued_cmdsn = session->cmdsn = initial_cmdsn;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = ISCSI_XMIT_CMDS_MAX;
++ session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+- mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
++
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
+
+- if (iscsi_add_session(cls_session, id))
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
+- iscsi_pool_free(&session->cmdpool);
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
++mgmtpool_alloc_fail:
++ iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
+- iscsi_pool_free(&session->cmdpool);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
++ iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+
+- kfree(session->password);
+- kfree(session->password_in);
+- kfree(session->username);
+- kfree(session->username_in);
+ kfree(session->targetname);
+- kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+ iscsi_destroy_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,68 +1472,74 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+ conn->id = conn_idx;
+ conn->exp_statsn = 0;
+- conn->tmf_state = TMF_INITIAL;
+-
+- init_timer(&conn->transport_timer);
+- conn->transport_timer.data = (unsigned long)conn;
+- conn->transport_timer.function = iscsi_check_transport_timeouts;
+-
++ conn->tmabort_state = TMABORT_INITIAL;
+ INIT_LIST_HEAD(&conn->run_list);
+ INIT_LIST_HEAD(&conn->mgmt_run_list);
+- INIT_LIST_HEAD(&conn->mgmtqueue);
+ INIT_LIST_HEAD(&conn->xmitqueue);
+- INIT_LIST_HEAD(&conn->requeue);
++
++ /* initialize general immediate & non-immediate PDU commands queue */
++ conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
++ GFP_KERNEL, NULL);
++ if (conn->immqueue == ERR_PTR(-ENOMEM))
++ goto immqueue_alloc_fail;
++
++ conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
++ GFP_KERNEL, NULL);
++ if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
++ goto mgmtqueue_alloc_fail;
++
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+- init_timer(&conn->tmf_timer);
++ init_timer(&conn->tmabort_timer);
++ mutex_init(&conn->xmitmutex);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
++ kfifo_free(conn->mgmtqueue);
++mgmtqueue_alloc_fail:
++ kfifo_free(conn->immqueue);
++immqueue_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2142,7 +1558,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_session *session = conn->session;
+ unsigned long flags;
+
+- del_timer_sync(&conn->transport_timer);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++ mutex_lock(&conn->xmitmutex);
+
+ spin_lock_bh(&session->lock);
+ conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
+@@ -2155,6 +1572,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
++ mutex_unlock(&conn->xmitmutex);
++
+ /*
+ * Block until all in-progress commands for this connection
+ * time out or fail.
+@@ -2167,10 +1586,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_irqrestore(session->host->host_lock, flags);
+ msleep_interruptible(500);
+- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
+- "host_busy %d host_failed %d\n",
+- session->host->host_busy,
+- session->host->host_failed);
++ printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d "
++ "host_failed %d\n", session->host->host_busy,
++ session->host->host_failed);
+ /*
+ * force eh_abort() to unblock
+ */
+@@ -2178,17 +1596,23 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+
+ /* flush queued up work because we free the connection below */
+- iscsi_suspend_tx(conn);
++ scsi_flush_work(session->host);
+
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+- if (session->leadconn == conn)
++ if (session->leadconn == conn) {
+ session->leadconn = NULL;
++ /* no connections exits.. reset sequencing */
++ session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
++ }
+ spin_unlock_bh(&session->lock);
+
++ kfifo_free(conn->immqueue);
++ kfifo_free(conn->mgmtqueue);
++
+ iscsi_destroy_conn(cls_conn);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
+@@ -2199,41 +1623,21 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_session *session = conn->session;
+
+ if (!session) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "can't start unbound connection\n");
++ printk(KERN_ERR "iscsi: can't start unbound connection\n");
+ return -EPERM;
+ }
+
+ if ((session->imm_data_en || !session->initial_r2t_en) &&
+ session->first_burst > session->max_burst) {
+- iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
+- "first_burst %d max_burst %d\n",
+- session->first_burst, session->max_burst);
++ printk("iscsi: invalid burst lengths: "
++ "first_burst %d max_burst %d\n",
++ session->first_burst, session->max_burst);
+ return -EINVAL;
+ }
+
+- if (conn->ping_timeout && !conn->recv_timeout) {
+- iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
+- "zero. Using 5 seconds\n.");
+- conn->recv_timeout = 5;
+- }
+-
+- if (conn->recv_timeout && !conn->ping_timeout) {
+- iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
+- "zero. Using 5 seconds.\n");
+- conn->ping_timeout = 5;
+- }
+-
+ spin_lock_bh(&session->lock);
+ conn->c_stage = ISCSI_CONN_STARTED;
+ session->state = ISCSI_STATE_LOGGED_IN;
+- session->queued_cmdsn = session->cmdsn;
+-
+- conn->last_recv = jiffies;
+- conn->last_ping = jiffies;
+- if (conn->recv_timeout && conn->ping_timeout)
+- mod_timer(&conn->transport_timer,
+- jiffies + (conn->recv_timeout * HZ));
+
+ switch(conn->stop_stage) {
+ case STOP_CONN_RECOVER:
+@@ -2242,11 +1646,13 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ * commands after successful recovery
+ */
+ conn->stop_stage = 0;
+- conn->tmf_state = TMF_INITIAL;
++ conn->tmabort_state = TMABORT_INITIAL;
+ session->age++;
+- if (session->age == 16)
+- session->age = 0;
+- break;
++ spin_unlock_bh(&session->lock);
++
++ iscsi_unblock_session(session_to_cls(session));
++ wake_up(&conn->ehwait);
++ return 0;
+ case STOP_CONN_TERM:
+ conn->stop_stage = 0;
+ break;
+@@ -2255,8 +1661,6 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
+- wake_up(&conn->ehwait);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+@@ -2264,23 +1668,52 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ while (__kfifo_get(conn->immqueue, (void*)&mtask, sizeof(void*)) ||
++ __kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
++ if (mtask == conn->login_mtask)
++ continue;
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ list_del(&mtask->running);
++
++ if (mtask == conn->login_mtask)
++ continue;
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
++ }
++
++ conn->mtask = NULL;
++}
++
++/* Fail commands. Mutex and session lock held and recv side suspended */
++static void fail_all_commands(struct iscsi_conn *conn)
++{
++ struct iscsi_cmd_task *ctask, *tmp;
++
++ /* flush pending */
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
++ ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
++ }
++
++ /* fail all other running */
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ debug_scsi("failing in progress sc %p itt 0x%x\n",
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+
+- conn->task = NULL;
++ conn->ctask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2288,13 +1721,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ {
+ int old_stop_stage;
+
+- del_timer_sync(&conn->transport_timer);
+-
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (conn->stop_stage == STOP_CONN_TERM) {
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return;
+ }
+
+@@ -2311,9 +1740,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ old_stop_stage = conn->stop_stage;
+ conn->stop_stage = flag;
+ conn->c_stage = ISCSI_CONN_STOPPED;
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ spin_unlock_bh(&session->lock);
+
+- iscsi_suspend_tx(conn);
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
++ mutex_lock(&conn->xmitmutex);
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +1760,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2334,11 +1768,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ * flush queues.
+ */
+ spin_lock_bh(&session->lock);
+- fail_all_commands(conn, -1,
+- STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
++ fail_all_commands(conn);
+ flush_control_queues(session, conn);
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
++ mutex_unlock(&conn->xmitmutex);
+ }
+
+ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+@@ -2352,8 +1786,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ iscsi_start_session_recovery(session, conn, flag);
+ break;
+ default:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid stop flag %d\n", flag);
++ printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+@@ -2361,7 +1794,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2387,21 +1820,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ uint32_t value;
+
+ switch(param) {
+- case ISCSI_PARAM_FAST_ABORT:
+- sscanf(buf, "%d", &session->fast_abort);
+- break;
+- case ISCSI_PARAM_ABORT_TMO:
+- sscanf(buf, "%d", &session->abort_timeout);
+- break;
+- case ISCSI_PARAM_LU_RESET_TMO:
+- sscanf(buf, "%d", &session->lu_reset_timeout);
+- break;
+- case ISCSI_PARAM_PING_TMO:
+- sscanf(buf, "%d", &conn->ping_timeout);
+- break;
+- case ISCSI_PARAM_RECV_TMO:
+- sscanf(buf, "%d", &conn->recv_timeout);
+- break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ sscanf(buf, "%d", &conn->max_recv_dlength);
+ break;
+@@ -2449,30 +1867,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ case ISCSI_PARAM_EXP_STATSN:
+ sscanf(buf, "%u", &conn->exp_statsn);
+ break;
+- case ISCSI_PARAM_USERNAME:
+- kfree(session->username);
+- session->username = kstrdup(buf, GFP_KERNEL);
+- if (!session->username)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_USERNAME_IN:
+- kfree(session->username_in);
+- session->username_in = kstrdup(buf, GFP_KERNEL);
+- if (!session->username_in)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_PASSWORD:
+- kfree(session->password);
+- session->password = kstrdup(buf, GFP_KERNEL);
+- if (!session->password)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_PASSWORD_IN:
+- kfree(session->password_in);
+- session->password_in = kstrdup(buf, GFP_KERNEL);
+- if (!session->password_in)
+- return -ENOMEM;
+- break;
+ case ISCSI_PARAM_TARGET_NAME:
+ /* this should not change between logins */
+ if (session->targetname)
+@@ -2500,14 +1894,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,19 +1905,11 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+- case ISCSI_PARAM_FAST_ABORT:
+- len = sprintf(buf, "%d\n", session->fast_abort);
+- break;
+- case ISCSI_PARAM_ABORT_TMO:
+- len = sprintf(buf, "%d\n", session->abort_timeout);
+- break;
+- case ISCSI_PARAM_LU_RESET_TMO:
+- len = sprintf(buf, "%d\n", session->lu_reset_timeout);
+- break;
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ len = sprintf(buf, "%d\n", session->initial_r2t_en);
+ break;
+@@ -2562,27 +1940,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_TPGT:
+ len = sprintf(buf, "%d\n", session->tpgt);
+ break;
+- case ISCSI_PARAM_USERNAME:
+- len = sprintf(buf, "%s\n", session->username);
+- break;
+- case ISCSI_PARAM_USERNAME_IN:
+- len = sprintf(buf, "%s\n", session->username_in);
+- break;
+- case ISCSI_PARAM_PASSWORD:
+- len = sprintf(buf, "%s\n", session->password);
+- break;
+- case ISCSI_PARAM_PASSWORD_IN:
+- len = sprintf(buf, "%s\n", session->password_in);
+- break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2598,12 +1955,6 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ int len;
+
+ switch(param) {
+- case ISCSI_PARAM_PING_TMO:
+- len = sprintf(buf, "%u\n", conn->ping_timeout);
+- break;
+- case ISCSI_PARAM_RECV_TMO:
+- len = sprintf(buf, "%u\n", conn->recv_timeout);
+- break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ len = sprintf(buf, "%u\n", conn->max_recv_dlength);
+ break;
+@@ -2639,72 +1990,6 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+
+-int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+- char *buf)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
+- int len;
+-
+- switch (param) {
+- case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- len = sprintf(buf, "%s\n", "default");
+- else
+- len = sprintf(buf, "%s\n", ihost->netdev);
+- break;
+- case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- len = sprintf(buf, "%s\n", "default");
+- else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
+- break;
+- case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
+- break;
+- default:
+- return -ENOSYS;
+- }
+-
+- return len;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+-
+-int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+- char *buf, int buflen)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
+-
+- switch (param) {
+- case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+- default:
+- return -ENOSYS;
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_set_param);
+-
+ MODULE_AUTHOR("Mike Christie");
+ MODULE_DESCRIPTION("iSCSI library functions");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..caf1836 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,28 +30,26 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
+-#define ISCSI_CONN_ATTRS 13
+-#define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_SESSION_ATTRS 11
++#define ISCSI_CONN_ATTRS 11
++#define ISCSI_HOST_ATTRS 0
++#define ISCSI_TRANSPORT_VERSION "2.0-724"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+-static struct workqueue_struct *iscsi_eh_timer_workq;
+
+ /*
+ * list of registered transports and lock that must
+@@ -64,12 +62,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +77,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,163 +115,22 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+-
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+-
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
+-
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
+- ihost->scan_workq = create_singlethread_workqueue(
+- ihost->scan_workq_name);
+- if (!ihost->scan_workq)
+- return -ENOMEM;
+- return 0;
+-}
+-
+-static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
+-{
+- struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+-
+- destroy_workqueue(ihost->scan_workq);
+ return 0;
+ }
+
+ static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+ "iscsi_host",
+ iscsi_setup_host,
+- iscsi_remove_host,
++ NULL,
+ NULL);
+
+ static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+@@ -340,54 +201,6 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
+ * The following functions can be used by LLDs that allocate
+ * their own scsi_hosts or by software iscsi LLDs
+ */
+-static struct {
+- int value;
+- char *name;
+-} iscsi_session_state_names[] = {
+- { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
+- { ISCSI_SESSION_FAILED, "FAILED" },
+- { ISCSI_SESSION_FREE, "FREE" },
+-};
+-
+-static const char *iscsi_session_state_name(int state)
+-{
+- int i;
+- char *name = NULL;
+-
+- for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
+- if (iscsi_session_state_names[i].value == state) {
+- name = iscsi_session_state_names[i].name;
+- break;
+- }
+- }
+- return name;
+-}
+-
+-int iscsi_session_chkready(struct iscsi_cls_session *session)
+-{
+- unsigned long flags;
+- int err;
+-
+- spin_lock_irqsave(&session->lock, flags);
+- switch (session->state) {
+- case ISCSI_SESSION_LOGGED_IN:
+- err = 0;
+- break;
+- case ISCSI_SESSION_FAILED:
+- err = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_SESSION_FREE:
+- err = DID_NO_CONNECT << 16;
+- break;
+- default:
+- err = DID_NO_CONNECT << 16;
+- break;
+- }
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_session_chkready);
+-
+ static void iscsi_session_release(struct device *dev)
+ {
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
+@@ -403,114 +216,22 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+-/**
+- * iscsi_scan_finished - helper to report when running scans are done
+- * @shost: scsi host
+- * @time: scan run time
+- *
+- * This function can be used by drives like qla4xxx to report to the scsi
+- * layer when the scans it kicked off at module load time are done.
+- */
+-int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+-{
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- /*
+- * qla4xxx will have kicked off some session unblocks before calling
+- * scsi_scan_host, so just wait for them to complete.
+- */
+- return !atomic_read(&ihost->nr_scans);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+-
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+-
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
+-}
+-
+-static void iscsi_scan_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session, scan_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
+-
+- iscsi_user_scan_session(&session->dev, &scan_data);
+- atomic_dec(&ihost->nr_scans);
++ return 0;
+ }
+
+ static void session_recovery_timedout(struct work_struct *work)
+@@ -518,24 +239,9 @@ static void session_recovery_timedout(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ recovery_work.work);
+- unsigned long flags;
+-
+- iscsi_cls_session_printk(KERN_INFO, session,
+- "session recovery timed out after %d secs\n",
+- session->recovery_tmo);
+
+- spin_lock_irqsave(&session->lock, flags);
+- switch (session->state) {
+- case ISCSI_SESSION_FAILED:
+- session->state = ISCSI_SESSION_FREE;
+- break;
+- case ISCSI_SESSION_LOGGED_IN:
+- case ISCSI_SESSION_FREE:
+- /* we raced with the unblock's flush */
+- spin_unlock_irqrestore(&session->lock, flags);
+- return;
+- }
+- spin_unlock_irqrestore(&session->lock, flags);
++ dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
++ "out after %d secs\n", session->recovery_tmo);
+
+ if (session->transport->session_recovery_timedout)
+ session->transport->session_recovery_timedout(session);
+@@ -543,201 +249,73 @@ static void session_recovery_timedout(struct work_struct *work)
+ scsi_target_unblock(&session->dev);
+ }
+
+-static void __iscsi_unblock_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- unblock_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+-
+- /*
+- * The recovery and unblock work get run from the same workqueue,
+- * so try to cancel it if it was going to run after this unblock.
+- */
+- cancel_delayed_work(&session->recovery_work);
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_LOGGED_IN;
+- spin_unlock_irqrestore(&session->lock, flags);
+- /* start IO */
+- scsi_target_unblock(&session->dev);
+- /*
+- * Only do kernel scanning if the driver is properly hooked into
+- * the async scanning code (drivers like iscsi_tcp do login and
+- * scanning from userspace).
+- */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
+-}
+-
+-/**
+- * iscsi_unblock_session - set a session as logged in and start IO.
+- * @session: iscsi session
+- *
+- * Mark a session as ready to accept IO.
+- */
+ void iscsi_unblock_session(struct iscsi_cls_session *session)
+ {
+- queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+- /*
+- * make sure all the events have completed before tell the driver
+- * it is safe
+- */
+- flush_workqueue(iscsi_eh_timer_workq);
++ if (!cancel_delayed_work(&session->recovery_work))
++ flush_scheduled_work();
++ scsi_target_unblock(&session->dev);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_unblock_session);
+
+-static void __iscsi_block_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- block_work);
+- unsigned long flags;
+-
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_FAILED;
+- spin_unlock_irqrestore(&session->lock, flags);
+- scsi_target_block(&session->dev);
+- queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
+- session->recovery_tmo * HZ);
+-}
+-
+ void iscsi_block_session(struct iscsi_cls_session *session)
+ {
+- queue_work(iscsi_eh_timer_workq, &session->block_work);
++ scsi_target_block(&session->dev);
++ schedule_delayed_work(&session->recovery_work,
++ session->recovery_tmo * HZ);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_block_session);
+
+-static void __iscsi_unbind_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- unbind_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+-
+- /* Prevent new scans and make sure scanning is not in progress */
+- mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return;
+- }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+-
+- scsi_remove_target(&session->dev);
+- iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+-}
+-
+-static int iscsi_unbind_session(struct iscsi_cls_session *session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+-
+- return queue_work(ihost->scan_workq, &session->unbind_work);
+-}
+-
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+
+ session->transport = transport;
+ session->recovery_tmo = 120;
+- session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+- INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+- INIT_WORK(&session->block_work, __iscsi_block_session);
+- INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
+- INIT_WORK(&session->scan_work, iscsi_scan_session);
+- spin_lock_init(&session->lock);
+
+ /* this is released in the dev's release function */
+ scsi_host_get(shost);
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id = target_id;
++ struct iscsi_host *ihost;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+ err = device_add(&session->dev);
+ if (err) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "could not register session's dev\n");
++ dev_printk(KERN_ERR, &session->dev, "iscsi: could not "
++ "register session's dev\n");
+ goto release_host;
+ }
+ transport_register_device(&session->dev);
+
+- spin_lock_irqsave(&sesslock, flags);
+- list_add(&session->sess_list, &sesslist);
+- spin_unlock_irqrestore(&sesslock, flags);
+-
+- iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
+ return 0;
+
+ release_host:
+@@ -750,18 +328,17 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+- * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -773,65 +350,19 @@ iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_create_session);
+
+-static void iscsi_conn_release(struct device *dev)
+-{
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
+- struct device *parent = conn->dev.parent;
+-
+- kfree(conn);
+- put_device(parent);
+-}
+-
+-static int iscsi_is_conn_dev(const struct device *dev)
+-{
+- return dev->release == iscsi_conn_release;
+-}
+-
+-static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+-{
+- if (!iscsi_is_conn_dev(dev))
+- return 0;
+- return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+-}
+-
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+- int err;
+-
+- spin_lock_irqsave(&sesslock, flags);
+- list_del(&session->sess_list);
+- spin_unlock_irqrestore(&sesslock, flags);
++ struct iscsi_host *ihost = shost->shost_data;
+
+- /* make sure there are no blocks/unblocks queued */
+- flush_workqueue(iscsi_eh_timer_workq);
+- /* make sure the timedout callout is not running */
+ if (!cancel_delayed_work(&session->recovery_work))
+- flush_workqueue(iscsi_eh_timer_workq);
+- /*
+- * If we are blocked let commands flow again. The lld or iscsi
+- * layer should set up the queuecommand to fail commands.
+- * We assume that LLD will not be calling block/unblock while
+- * removing the session.
+- */
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_FREE;
+- spin_unlock_irqrestore(&session->lock, flags);
++ flush_scheduled_work();
+
+- scsi_target_unblock(&session->dev);
+- /* flush running scans then delete devices */
+- flush_workqueue(ihost->scan_workq);
+- __iscsi_unbind_session(&session->unbind_work);
++ mutex_lock(&ihost->mutex);
++ list_del(&session->host_list);
++ mutex_unlock(&ihost->mutex);
+
+- /* hw iscsi may not have removed all connections from session */
+- err = device_for_each_child(&session->dev, NULL,
+- iscsi_iter_destroy_conn_fn);
+- if (err)
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Could not delete all connections "
+- "for session. Error %d.\n", err);
++ scsi_remove_target(&session->dev);
+
+ transport_unregister_device(&session->dev);
+ device_del(&session->dev);
+@@ -840,9 +371,9 @@ EXPORT_SYMBOL_GPL(iscsi_remove_session);
+
+ void iscsi_free_session(struct iscsi_cls_session *session)
+ {
+- iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
+ put_device(&session->dev);
+ }
++
+ EXPORT_SYMBOL_GPL(iscsi_free_session);
+
+ /**
+@@ -851,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session);
+ *
+ * Can be called by a LLD or iscsi_transport. There must not be
+ * any running connections.
+- */
++ **/
+ int iscsi_destroy_session(struct iscsi_cls_session *session)
+ {
+ iscsi_remove_session(session);
+@@ -860,10 +391,23 @@ int iscsi_destroy_session(struct iscsi_cls_session *session)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+
++static void iscsi_conn_release(struct device *dev)
++{
++ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
++ struct device *parent = conn->dev.parent;
++
++ kfree(conn);
++ put_device(parent);
++}
++
++static int iscsi_is_conn_dev(const struct device *dev)
++{
++ return dev->release == iscsi_conn_release;
++}
++
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -874,19 +418,19 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * for software iscsi we could be trying to preallocate a connection struct
+ * in which case there could be two connection structs and cid would be
+ * non-zero.
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+- unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -903,16 +447,11 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+ conn->dev.release = iscsi_conn_release;
+ err = device_register(&conn->dev);
+ if (err) {
+- iscsi_cls_session_printk(KERN_ERR, session, "could not "
+- "register connection's dev\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register "
++ "connection's dev\n");
+ goto release_parent_ref;
+ }
+ transport_register_device(&conn->dev);
+-
+- spin_lock_irqsave(&connlock, flags);
+- list_add(&conn->conn_list, &connlist);
+- conn->active = 1;
+- spin_unlock_irqrestore(&connlock, flags);
+ return conn;
+
+ release_parent_ref:
+@@ -926,23 +465,17 @@ EXPORT_SYMBOL_GPL(iscsi_create_conn);
+
+ /**
+ * iscsi_destroy_conn - destroy iscsi class connection
+- * @conn: iscsi cls session
++ * @session: iscsi cls session
+ *
+ * This can be called from a LLD or iscsi_transport.
+- */
++ **/
+ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+ {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&connlock, flags);
+- conn->active = 0;
+- list_del(&conn->conn_list);
+- spin_unlock_irqrestore(&connlock, flags);
+-
+ transport_unregister_device(&conn->dev);
+ device_unregister(&conn->dev);
+ return 0;
+ }
++
+ EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+
+ /*
+@@ -1011,8 +544,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
+- iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
+- "control PDU: OOM\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
++ "control PDU: OOM\n");
+ return -ENOMEM;
+ }
+
+@@ -1045,8 +578,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+- iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
+- "conn error (%d)\n", error);
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored "
++ "conn error (%d)\n", error);
+ return;
+ }
+
+@@ -1060,8 +593,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+
+ iscsi_broadcast_skb(skb, GFP_ATOMIC);
+
+- iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
+- error);
++ dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
++ error);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_error);
+
+@@ -1076,10 +609,12 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
+ int t = done ? NLMSG_DONE : type;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+- if (!skb) {
+- printk(KERN_ERR "Could not allocate skb to send reply.\n");
+- return -ENOMEM;
+- }
++ /*
++ * FIXME:
++ * user is supposed to react on iferror == -ENOMEM;
++ * see iscsi_if_rx().
++ */
++ BUG_ON(!skb);
+
+ nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
+ nlh->nlmsg_flags = flags;
+@@ -1116,8 +651,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+
+ skbstat = alloc_skb(len, GFP_ATOMIC);
+ if (!skbstat) {
+- iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
+- "deliver stats: OOM\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: can not "
++ "deliver stats: OOM\n");
+ return -ENOMEM;
+ }
+
+@@ -1152,87 +687,145 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+ }
+
+ /**
+- * iscsi_session_event - send session destr. completion event
+- * @session: iscsi class session
+- * @event: type of event
+- */
+-int iscsi_session_event(struct iscsi_cls_session *session,
+- enum iscsi_uevent_e event)
++ * iscsi_if_destroy_session_done - send session destr. completion event
++ * @conn: last connection for session
++ *
++ * This is called by HW iscsi LLDs to notify userpsace that its HW has
++ * removed a session.
++ **/
++int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
+ {
+ struct iscsi_internal *priv;
++ struct iscsi_cls_session *session;
+ struct Scsi_Host *shost;
+ struct iscsi_uevent *ev;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
++ unsigned long flags;
+ int rc, len = NLMSG_SPACE(sizeof(*ev));
+
+- priv = iscsi_if_transport_lookup(session->transport);
++ priv = iscsi_if_transport_lookup(conn->transport);
+ if (!priv)
+ return -EINVAL;
++
++ session = iscsi_dev_to_session(conn->dev.parent);
+ shost = iscsi_session_to_shost(session);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Cannot notify userspace of session "
+- "event %u\n", event);
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event\n");
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = NLMSG_DATA(nlh);
+- ev->transport_handle = iscsi_handle(session->transport);
++ ev->transport_handle = iscsi_handle(conn->transport);
++ ev->type = ISCSI_KEVENT_DESTROY_SESSION;
++ ev->r.d_session.host_no = shost->host_no;
++ ev->r.d_session.sid = session->sid;
+
+- ev->type = event;
+- switch (event) {
+- case ISCSI_KEVENT_DESTROY_SESSION:
+- ev->r.d_session.host_no = shost->host_no;
+- ev->r.d_session.sid = session->sid;
+- break;
+- case ISCSI_KEVENT_CREATE_SESSION:
+- ev->r.c_session_ret.host_no = shost->host_no;
+- ev->r.c_session_ret.sid = session->sid;
+- break;
+- case ISCSI_KEVENT_UNBIND_SESSION:
+- ev->r.unbind_session.host_no = shost->host_no;
+- ev->r.unbind_session.sid = session->sid;
+- break;
+- default:
+- iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
+- "%u.\n", event);
+- kfree_skb(skb);
++ /*
++ * this will occur if the daemon is not up, so we just warn
++ * the user and when the daemon is restarted it will handle it
++ */
++ rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
++ if (rc < 0)
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session destruction event. Check iscsi daemon\n");
++
++ spin_lock_irqsave(&sesslock, flags);
++ list_del(&session->sess_list);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ spin_lock_irqsave(&connlock, flags);
++ conn->active = 0;
++ list_del(&conn->conn_list);
++ spin_unlock_irqrestore(&connlock, flags);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(iscsi_if_destroy_session_done);
++
++/**
++ * iscsi_if_create_session_done - send session creation completion event
++ * @conn: leading connection for session
++ *
++ * This is called by HW iscsi LLDs to notify userpsace that its HW has
++ * created a session or a existing session is back in the logged in state.
++ **/
++int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
++{
++ struct iscsi_internal *priv;
++ struct iscsi_cls_session *session;
++ struct Scsi_Host *shost;
++ struct iscsi_uevent *ev;
++ struct sk_buff *skb;
++ struct nlmsghdr *nlh;
++ unsigned long flags;
++ int rc, len = NLMSG_SPACE(sizeof(*ev));
++
++ priv = iscsi_if_transport_lookup(conn->transport);
++ if (!priv)
+ return -EINVAL;
++
++ session = iscsi_dev_to_session(conn->dev.parent);
++ shost = iscsi_session_to_shost(session);
++
++ skb = alloc_skb(len, GFP_KERNEL);
++ if (!skb) {
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event\n");
++ return -ENOMEM;
+ }
+
++ nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
++ ev = NLMSG_DATA(nlh);
++ ev->transport_handle = iscsi_handle(conn->transport);
++ ev->type = ISCSI_UEVENT_CREATE_SESSION;
++ ev->r.c_session_ret.host_no = shost->host_no;
++ ev->r.c_session_ret.sid = session->sid;
++
+ /*
+ * this will occur if the daemon is not up, so we just warn
+ * the user and when the daemon is restarted it will handle it
+ */
+ rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
+ if (rc < 0)
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Cannot notify userspace of session "
+- "event %u. Check iscsi daemon\n",
+- event);
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event. Check iscsi daemon\n");
++
++ spin_lock_irqsave(&sesslock, flags);
++ list_add(&session->sess_list, &sesslist);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ spin_lock_irqsave(&connlock, flags);
++ list_add(&conn->conn_list, &connlist);
++ conn->active = 1;
++ spin_unlock_irqrestore(&connlock, flags);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_session_event);
++EXPORT_SYMBOL_GPL(iscsi_if_create_session_done);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ unsigned long flags;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ spin_lock_irqsave(&sesslock, flags);
++ list_add(&session->sess_list, &sesslist);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1242,34 +835,47 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
+ struct iscsi_cls_conn *conn;
+ struct iscsi_cls_session *session;
++ unsigned long flags;
+
+ session = iscsi_session_lookup(ev->u.c_conn.sid);
+ if (!session) {
+- printk(KERN_ERR "iscsi: invalid session %d.\n",
++ printk(KERN_ERR "iscsi: invalid session %d\n",
+ ev->u.c_conn.sid);
+ return -EINVAL;
+ }
+
+ conn = transport->create_conn(session, ev->u.c_conn.cid);
+ if (!conn) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "couldn't create a new connection.");
++ printk(KERN_ERR "iscsi: couldn't create a new "
++ "connection for session %d\n",
++ session->sid);
+ return -ENOMEM;
+ }
+
+ ev->r.c_conn_ret.sid = session->sid;
+ ev->r.c_conn_ret.cid = conn->cid;
++
++ spin_lock_irqsave(&connlock, flags);
++ list_add(&conn->conn_list, &connlist);
++ conn->active = 1;
++ spin_unlock_irqrestore(&connlock, flags);
++
+ return 0;
+ }
+
+ static int
+ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
++ unsigned long flags;
+ struct iscsi_cls_conn *conn;
+
+ conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
+ if (!conn)
+ return -EINVAL;
++ spin_lock_irqsave(&connlock, flags);
++ conn->active = 0;
++ list_del(&conn->conn_list);
++ spin_unlock_irqrestore(&connlock, flags);
+
+ if (transport->destroy_conn)
+ transport->destroy_conn(conn);
+@@ -1307,7 +913,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +922,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1353,50 +947,15 @@ static int
+ iscsi_tgt_dscvr(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+ {
+- struct Scsi_Host *shost;
+ struct sockaddr *dst_addr;
+- int err;
+
+ if (!transport->tgt_dscvr)
+ return -EINVAL;
+
+- shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
+- if (IS_ERR(shost)) {
+- printk(KERN_ERR "target discovery could not find host no %u\n",
+- ev->u.tgt_dscvr.host_no);
+- return -ENODEV;
+- }
+-
+-
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
+- ev->u.tgt_dscvr.enable, dst_addr);
+- scsi_host_put(shost);
+- return err;
+-}
+-
+-static int
+-iscsi_set_host_param(struct iscsi_transport *transport,
+- struct iscsi_uevent *ev)
+-{
+- char *data = (char*)ev + sizeof(*ev);
+- struct Scsi_Host *shost;
+- int err;
+-
+- if (!transport->set_host_param)
+- return -ENOSYS;
+-
+- shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+- if (IS_ERR(shost)) {
+- printk(KERN_ERR "set_host_param could not find host no %u\n",
+- ev->u.set_host_param.host_no);
+- return -ENODEV;
+- }
+-
+- err = transport->set_host_param(shost, ev->u.set_host_param.param,
+- data, ev->u.set_host_param.len);
+- scsi_host_put(shost);
+- return err;
++ return transport->tgt_dscvr(ev->u.tgt_dscvr.type,
++ ev->u.tgt_dscvr.host_no,
++ ev->u.tgt_dscvr.enable, dst_addr);
+ }
+
+ static int
+@@ -1408,7 +967,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
++ unsigned long flags;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,35 +981,17 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+- if (session)
++ if (session) {
++ spin_lock_irqsave(&sesslock, flags);
++ list_del(&session->sess_list);
++ spin_unlock_irqrestore(&sesslock, flags);
++
+ transport->destroy_session(session);
+- else
+- err = -EINVAL;
+- break;
+- case ISCSI_UEVENT_UNBIND_SESSION:
+- session = iscsi_session_lookup(ev->u.d_session.sid);
+- if (session)
+- iscsi_unbind_session(session);
+- else
++ } else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_CREATE_CONN:
+@@ -1508,11 +1049,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ case ISCSI_UEVENT_TGT_DSCVR:
+ err = iscsi_tgt_dscvr(transport, ev);
+ break;
+- case ISCSI_UEVENT_SET_HOST_PARAM:
+- err = iscsi_set_host_param(transport, ev);
+- break;
+ default:
+- err = -ENOSYS;
++ err = -EINVAL;
+ break;
+ }
+
+@@ -1521,55 +1059,70 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ }
+
+ /*
+- * Get message from skb. Each message is processed by iscsi_if_recv_msg.
+- * Malformed skbs with wrong lengths or invalid creds are not processed.
++ * Get message from skb (based on rtnetlink_rcv_skb). Each message is
++ * processed by iscsi_if_recv_msg. Malformed skbs with wrong lengths or
++ * invalid creds are discarded silently.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1130,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1601,68 +1153,43 @@ iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
+ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
+ iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
+ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+-iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+-iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
++
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
+
+ /*
+ * iSCSI session attrs
+ */
+-#define iscsi_session_attr_show(param, perm) \
++#define iscsi_session_attr_show(param) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+- \
+- if (perm && !capable(CAP_SYS_ADMIN)) \
+- return -EACCES; \
+ return t->get_session_param(session, param, buf); \
+ }
+
+-#define iscsi_session_attr(field, param, perm) \
+- iscsi_session_attr_show(param, perm) \
++#define iscsi_session_attr(field, param) \
++ iscsi_session_attr_show(param) \
+ static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
+ NULL);
+
+-iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
+-iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
+-iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
+-iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
+-iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
+-iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
+-iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
+-iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
+-iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
+-iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
+-iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
+-iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
+-iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
+-iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+-iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+-iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+-iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+-
+-static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
+-{
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+- return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+-}
+-static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+- NULL);
++iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME);
++iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN);
++iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T);
++iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN);
++iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST);
++iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST);
++iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN);
++iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN);
++iscsi_session_attr(erl, ISCSI_PARAM_ERL);
++iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT);
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1672,32 +1199,9 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
+ NULL)
+ iscsi_priv_session_attr(recovery_tmo, "%d");
+
+-/*
+- * iSCSI host attrs
+- */
+-#define iscsi_host_attr_show(param) \
+-static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
+-{ \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
+- struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+- return priv->iscsi_transport->get_host_param(shost, param, buf); \
+-}
+-
+-#define iscsi_host_attr(field, param) \
+- iscsi_host_attr_show(param) \
+-static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
+- NULL);
+-
+-iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
+-iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
+-iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
+-iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+-
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1209,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,15 +1217,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
+- count++; \
+- } \
+-} while (0)
+-
+-#define SETUP_HOST_RD_ATTR(field, param_flag) \
+-do { \
+- if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,34 +1307,25 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
++ priv->host_attrs[0] = NULL;
+ transport_container_register(&priv->t.host_attrs);
+
+- SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+- SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
+- SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
+- SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
+- BUG_ON(count > ISCSI_HOST_ATTRS);
+- priv->host_attrs[count] = NULL;
+- count = 0;
+-
+ /* connection parameters */
+ priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
+ priv->conn_cont.ac.class = &iscsi_connection_class.class;
+@@ -1856,8 +1343,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
+ SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
+ SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
+- SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO);
+- SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO);
+
+ BUG_ON(count > ISCSI_CONN_ATTRS);
+ priv->conn_attrs[count] = NULL;
+@@ -1879,17 +1364,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
+ SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
+ SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
+- SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
+- SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
+- SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
+- SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
+- SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+- SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+- SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+- SETUP_PRIV_SESSION_RD_ATTR(state);
+
+ BUG_ON(count > ISCSI_SESSION_ATTRS);
+ priv->session_attrs[count] = NULL;
+@@ -1901,9 +1376,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1404,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1425,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,29 +1437,21 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+ }
+
+- iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+- if (!iscsi_eh_timer_workq)
+- goto release_nls;
+-
+ return 0;
+
+-release_nls:
+- netlink_kernel_release(nls);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -1997,12 +1459,10 @@ unregister_transport_class:
+
+ static void __exit iscsi_transport_exit(void)
+ {
+- destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..55ebf03 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -48,17 +48,12 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14,
+
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+- ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+- ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+ ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2,
+ ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3,
+ ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
+- ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5,
+- ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6,
+ };
+
+ enum iscsi_tgt_dscvr {
+@@ -76,15 +71,7 @@ struct iscsi_uevent {
+ /* messages u -> k */
+ struct msg_create_session {
+ uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -149,11 +136,6 @@ struct iscsi_uevent {
+ */
+ uint32_t enable;
+ } tgt_dscvr;
+- struct msg_set_host_param {
+- uint32_t host_no;
+- uint32_t param; /* enum iscsi_host_param */
+- uint32_t len;
+- } set_host_param;
+ } u;
+ union {
+ /* messages k -> u */
+@@ -166,10 +148,6 @@ struct iscsi_uevent {
+ uint32_t sid;
+ uint32_t cid;
+ } c_conn_ret;
+- struct msg_unbind_session {
+- uint32_t sid;
+- uint32_t host_no;
+- } unbind_session;
+ struct msg_recv_req {
+ uint32_t sid;
+ uint32_t cid;
+@@ -245,78 +223,42 @@ enum iscsi_param {
+ ISCSI_PARAM_CONN_PORT,
+ ISCSI_PARAM_CONN_ADDRESS,
+
+- ISCSI_PARAM_USERNAME,
+- ISCSI_PARAM_USERNAME_IN,
+- ISCSI_PARAM_PASSWORD,
+- ISCSI_PARAM_PASSWORD_IN,
+-
+- ISCSI_PARAM_FAST_ABORT,
+- ISCSI_PARAM_ABORT_TMO,
+- ISCSI_PARAM_LU_RESET_TMO,
+- ISCSI_PARAM_HOST_RESET_TMO,
+-
+- ISCSI_PARAM_PING_TMO,
+- ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
+-
+-/* iSCSI HBA params */
+-enum iscsi_host_param {
+- ISCSI_HOST_PARAM_HWADDRESS,
+- ISCSI_HOST_PARAM_INITIATOR_NAME,
+- ISCSI_HOST_PARAM_NETDEV_NAME,
+- ISCSI_HOST_PARAM_IPADDRESS,
+- ISCSI_HOST_PARAM_MAX,
+-};
+-
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+@@ -330,9 +272,6 @@ enum iscsi_host_param {
+ #define CAP_MULTI_CONN 0x40
+ #define CAP_TEXT_NEGO 0x80
+ #define CAP_MARKERS 0x100
+-#define CAP_FW_DB 0x200
+-#define CAP_SENDTARGETS_OFFLOAD 0x400
+-#define CAP_DATA_PATH_OFFLOAD 0x800
+
+ /*
+ * These flags describes reason of stop_conn() call
+diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
+index f2a2c11..8d1e4e8 100644
+--- a/include/scsi/iscsi_proto.h
++++ b/include/scsi/iscsi_proto.h
+@@ -21,16 +21,13 @@
+ #ifndef ISCSI_PROTO_H
+ #define ISCSI_PROTO_H
+
+-#include <linux/types.h>
+-#include <scsi/scsi.h>
+-
+ #define ISCSI_DRAFT20_VERSION 0x00
+
+ /* default iSCSI listen port for incoming connections */
+ #define ISCSI_LISTEN_PORT 3260
+
+ /* Padding word length */
+-#define ISCSI_PAD_LEN 4
++#define PAD_WORD_LEN 4
+
+ /*
+ * useful common(control and data pathes) macro
+@@ -46,8 +43,8 @@
+ /* initiator tags; opaque for target */
+ typedef uint32_t __bitwise__ itt_t;
+ /* below makes sense only for initiator that created this tag */
+-#define build_itt(itt, age) ((__force itt_t)\
+- ((itt) | ((age) << ISCSI_AGE_SHIFT)))
++#define build_itt(itt, id, age) ((__force itt_t)\
++ ((itt) | ((id) << ISCSI_CID_SHIFT) | ((age) << ISCSI_AGE_SHIFT)))
+ #define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK)
+ #define RESERVED_ITT ((__force itt_t)0xffffffff)
+
+@@ -113,7 +110,6 @@ struct iscsi_ahs_hdr {
+
+ #define ISCSI_AHSTYPE_CDB 1
+ #define ISCSI_AHSTYPE_RLENGTH 2
+-#define ISCSI_CDB_SIZE 16
+
+ /* iSCSI PDU Header */
+ struct iscsi_cmd {
+@@ -127,7 +123,7 @@ struct iscsi_cmd {
+ __be32 data_length;
+ __be32 cmdsn;
+ __be32 exp_statsn;
+- uint8_t cdb[ISCSI_CDB_SIZE]; /* SCSI Command Block */
++ uint8_t cdb[16]; /* SCSI Command Block */
+ /* Additional Data (Command Dependent) */
+ };
+
+@@ -151,15 +147,6 @@ struct iscsi_rlength_ahdr {
+ __be32 read_length;
+ };
+
+-/* Extended CDB AHS */
+-struct iscsi_ecdb_ahdr {
+- __be16 ahslength; /* CDB length - 15, including reserved byte */
+- uint8_t ahstype;
+- uint8_t reserved;
+- /* 4-byte aligned extended CDB spillover */
+- uint8_t ecdb[SCSI_MAX_VARLEN_CDB_SIZE - ISCSI_CDB_SIZE];
+-};
+-
+ /* SCSI Response Header */
+ struct iscsi_cmd_rsp {
+ uint8_t opcode;
+@@ -613,8 +600,6 @@ struct iscsi_reject {
+ #define ISCSI_MIN_MAX_BURST_LEN 512
+ #define ISCSI_MAX_MAX_BURST_LEN 16777215
+
+-#define ISCSI_DEF_TIME2WAIT 2
+-
+ /************************* RFC 3720 End *****************************/
+
+ #endif /* ISCSI_PROTO_H */
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..ea0816d 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -51,68 +48,69 @@ struct device;
+ #define debug_scsi(fmt...)
+ #endif
+
+-#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_XMIT_CMDS_MAX 128 /* must be power of 2 */
++#define ISCSI_MGMT_CMDS_MAX 32 /* must be power of 2 */
++#define ISCSI_CONN_MAX 1
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+
+ /* Task Mgmt states */
+-enum {
+- TMF_INITIAL,
+- TMF_QUEUED,
+- TMF_SUCCESS,
+- TMF_FAILED,
+- TMF_TIMEDOUT,
+- TMF_NOT_FOUND,
+-};
++#define TMABORT_INITIAL 0x0
++#define TMABORT_SUCCESS 0x1
++#define TMABORT_FAILED 0x2
++#define TMABORT_TIMEDOUT 0x3
++#define TMABORT_NOT_FOUND 0x4
+
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
++#define ISCSI_CID_SHIFT 12
++#define ISCSI_CID_MASK (0xffff << ISCSI_CID_SHIFT)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+-#define ISCSI_ADDRESS_BUF_LEN 64
+-
+-enum {
+- /* this is the maximum possible storage for AHSs */
+- ISCSI_MAX_AHS_SIZE = sizeof(struct iscsi_ecdb_ahdr) +
+- sizeof(struct iscsi_rlength_ahdr),
+- ISCSI_DIGEST_SIZE = sizeof(__u32),
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ int data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
+ };
+
+-
+ enum {
+ ISCSI_TASK_COMPLETED,
+ ISCSI_TASK_PENDING,
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+- * Because LLDs allocate their hdr differently, this is a pointer
+- * and length to that storage. It must be setup at session
+- * creation time.
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
+ */
+ struct iscsi_cmd *hdr;
+- unsigned short hdr_max;
+- unsigned short hdr_len; /* accumulated size of hdr used */
+ int itt; /* this ITT */
++ int datasn; /* DataSN */
+
+ uint32_t unsol_datasn;
+- unsigned imm_count; /* imm-data (bytes) */
+- unsigned unsol_count; /* unsolicited (bytes)*/
++ int imm_count; /* imm-data (bytes) */
++ int unsol_count; /* unsolicited (bytes)*/
+ /* offset in unsolicited stream (bytes); */
+- unsigned unsol_offset;
+- unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
++ int unsol_offset;
++ int data_count; /* remaining Data-Out */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
++ int total_length;
+ struct iscsi_conn *conn; /* used connection */
++ struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */
+
+ /* state set/tested under session->lock */
+ int state;
+@@ -121,33 +119,19 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
+-{
+- return (void*)task->hdr + task->hdr_len;
+-}
+-
+-/* Connection's states */
+-enum {
+- ISCSI_CONN_INITIAL_STAGE,
+- ISCSI_CONN_STARTED,
+- ISCSI_CONN_STOPPED,
+- ISCSI_CONN_CLEANUP_WAIT,
+-};
+-
+ struct iscsi_conn {
+ struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+- struct timer_list transport_timer;
+- unsigned long last_recv;
+- unsigned long last_ping;
+- int ping_timeout;
+- int recv_timeout;
+- struct iscsi_task *ping_task;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,28 +147,35 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+- struct list_head mgmtqueue; /* mgmt (control) xmit queue */
++ struct kfifo *immqueue; /* immediate xmit queue */
++ struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */
+ struct list_head mgmt_run_list; /* list of control tasks */
+ struct list_head xmitqueue; /* data-path cmd queue */
+ struct list_head run_list; /* list of cmds in progress */
+- struct list_head requeue; /* tasks needing another run */
+ struct work_struct xmitwork; /* per-conn. xmit workqueue */
++ /*
++ * serializes connection xmit, access to kfifos:
++ * xmitqueue, immqueue, mgmtqueue
++ */
++ struct mutex xmitmutex;
++
+ unsigned long suspend_tx; /* suspend Tx */
+ unsigned long suspend_rx; /* suspend Rx */
+
+ /* abort */
+ wait_queue_head_t ehwait; /* used in eh_abort() */
+ struct iscsi_tm tmhdr;
+- struct timer_list tmf_timer;
+- int tmf_state; /* see TMF_INITIAL, etc.*/
++ struct timer_list tmabort_timer;
++ int tmabort_state; /* see TMABORT_INITIAL, etc.*/
+
+ /* negotiated params */
+- unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
+- unsigned max_xmit_dlength; /* target_max_recv_dsl */
++ int max_recv_dlength; /* initiator_max_recv_dsl*/
++ int max_xmit_dlength; /* target_max_recv_dsl */
+ int hdrdgst_en;
+ int datadgst_en;
+ int ifmarker_en;
+@@ -192,9 +183,6 @@ struct iscsi_conn {
+ /* values userspace uses to id a conn */
+ int persistent_port;
+ char *persistent_address;
+- /* remote portal currently connected to */
+- int portal_port;
+- char portal_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,65 +197,34 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+-struct iscsi_pool {
++struct iscsi_queue {
+ struct kfifo *queue; /* FIFO Queue */
+ void **pool; /* Pool of elements */
+ int max; /* Max number of elements */
+ };
+
+-/* Session's states */
+-enum {
+- ISCSI_STATE_FREE = 1,
+- ISCSI_STATE_LOGGED_IN,
+- ISCSI_STATE_FAILED,
+- ISCSI_STATE_TERMINATE,
+- ISCSI_STATE_IN_RECOVERY,
+- ISCSI_STATE_RECOVERY_FAILED,
+- ISCSI_STATE_LOGGING_OUT,
+-};
+-
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+- /*
+- * Syncs up the scsi eh thread with the iscsi eh thread when sending
+- * task management functions. This must be taken before the session
+- * and recv lock.
+- */
+- struct mutex eh_mutex;
+-
+ /* iSCSI session-wide sequencing */
+ uint32_t cmdsn;
+ uint32_t exp_cmdsn;
+ uint32_t max_cmdsn;
+
+- /* This tracks the reqs queued into the initiator */
+- uint32_t queued_cmdsn;
+-
+ /* configuration */
+- int abort_timeout;
+- int lu_reset_timeout;
+ int initial_r2t_en;
+- unsigned max_r2t;
++ int max_r2t;
+ int imm_data_en;
+- unsigned first_burst;
+- unsigned max_burst;
++ int first_burst;
++ int max_burst;
+ int time2wait;
+ int time2retain;
+ int pdu_inorder_en;
+ int dataseq_inorder_en;
+ int erl;
+- int fast_abort;
+ int tpgt;
+- char *username;
+- char *username_in;
+- char *password;
+- char *password_in;
+ char *targetname;
+- char *ifacename;
+- char *initiatorname;
++
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +238,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
+- struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
++ struct iscsi_queue cmdpool; /* PDU's pool */
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -303,48 +252,31 @@ struct iscsi_host {
+ extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth);
+ extern int iscsi_eh_abort(struct scsi_cmnd *sc);
+ extern int iscsi_eh_host_reset(struct scsi_cmnd *sc);
+-extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
+ /*
+- * iSCSI host helpers.
+- */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+-extern int iscsi_host_set_param(struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf,
+- int buflen);
+-extern int iscsi_host_get_param(struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+-
+-/*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
+-#define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,17 +285,13 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+-
+-#define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+-extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern int iscsi_check_assign_cmdsn(struct iscsi_session *,
++ struct iscsi_nopin *);
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+@@ -371,34 +299,13 @@ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
+
+ /*
+ * generic helpers
+ */
+-extern void iscsi_pool_free(struct iscsi_pool *);
+-extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int);
+-
+-/*
+- * inline functions to deal with padding.
+- */
+-static inline unsigned int
+-iscsi_padded(unsigned int len)
+-{
+- return (len + ISCSI_PAD_LEN - 1) & ~(ISCSI_PAD_LEN - 1);
+-}
+-
+-static inline unsigned int
+-iscsi_padding(unsigned int len)
+-{
+- len &= (ISCSI_PAD_LEN - 1);
+- if (len)
+- len = ISCSI_PAD_LEN - len;
+- return len;
+-}
++extern void iscsi_pool_free(struct iscsi_queue *, void **);
++extern int iscsi_pool_init(struct iscsi_queue *, int, void ***, int);
+
+ #endif
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..d5c218d 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -24,17 +24,15 @@
+ #define SCSI_TRANSPORT_ISCSI_H
+
+ #include <linux/device.h>
+-#include <linux/list.h>
+-#include <linux/mutex.h>
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +56,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -84,11 +79,17 @@ struct iscsi_transport {
+ char *name;
+ unsigned int caps;
+ /* LLD sets this to indicate what values it can export to sysfs */
+- uint64_t param_mask;
+- uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ unsigned int param_mask;
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -104,25 +105,26 @@ struct iscsi_transport {
+ enum iscsi_param param, char *buf);
+ int (*get_session_param) (struct iscsi_cls_session *session,
+ enum iscsi_param param, char *buf);
+- int (*get_host_param) (struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf);
+- int (*set_host_param) (struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf,
+- int buflen);
+ int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ void (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask,
++ char *data, uint32_t data_size);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
+- int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
++ int (*tgt_dscvr) (enum iscsi_tgt_dscvr type, uint32_t host_no,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+
+@@ -139,6 +141,13 @@ extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
+ extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size);
+
++
++/* Connection's states */
++#define ISCSI_CONN_INITIAL_STAGE 0
++#define ISCSI_CONN_STARTED 1
++#define ISCSI_CONN_STOPPED 2
++#define ISCSI_CONN_CLEANUP_WAIT 3
++
+ struct iscsi_cls_conn {
+ struct list_head conn_list; /* item in connlist */
+ void *dd_data; /* LLD private data */
+@@ -152,34 +161,25 @@ struct iscsi_cls_conn {
+ #define iscsi_dev_to_conn(_dev) \
+ container_of(_dev, struct iscsi_cls_conn, dev)
+
+-#define iscsi_conn_to_session(_conn) \
+- iscsi_dev_to_session(_conn->dev.parent)
+-
+-/* iscsi class session state */
+-enum {
+- ISCSI_SESSION_LOGGED_IN,
+- ISCSI_SESSION_FAILED,
+- ISCSI_SESSION_FREE,
+-};
+-
+-#define ISCSI_MAX_TARGET -1
++/* Session's states */
++#define ISCSI_STATE_FREE 1
++#define ISCSI_STATE_LOGGED_IN 2
++#define ISCSI_STATE_FAILED 3
++#define ISCSI_STATE_TERMINATE 4
++#define ISCSI_STATE_IN_RECOVERY 5
++#define ISCSI_STATE_RECOVERY_FAILED 6
+
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+- spinlock_t lock;
+- struct work_struct block_work;
+- struct work_struct unblock_work;
+- struct work_struct scan_work;
+- struct work_struct unbind_work;
+
+ /* recovery fields */
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+- int state;
+ int sid; /* session id */
+ void *dd_data; /* LLD private data */
+ struct device dev; /* sysfs transport/container device */
+@@ -194,53 +194,31 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
+- atomic_t nr_scans;
++struct iscsi_host {
++ struct list_head sessions;
+ struct mutex mutex;
+- struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
+ };
+
+ /*
+ * session and connection functions that can be used by HW iSCSI LLDs
+ */
+-#define iscsi_cls_session_printk(prefix, _cls_session, fmt, a...) \
+- dev_printk(prefix, &(_cls_session)->dev, fmt, ##a)
+-
+-#define iscsi_cls_conn_printk(prefix, _cls_conn, fmt, a...) \
+- dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a)
+-
+-extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+-extern int iscsi_session_event(struct iscsi_cls_session *session,
+- enum iscsi_uevent_e event);
++extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn);
++extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+-extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
++
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U5/iscsi_02_add_to_2_6_9.patch b/kernel_patches/backport/2.6.9_U5/iscsi_02_add_to_2_6_9.patch
new file mode 100644
index 0000000..1f05d95
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iscsi_02_add_to_2_6_9.patch
@@ -0,0 +1,180 @@
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 4376840..11dfaf9 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -2145,7 +2145,6 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ static struct scsi_host_template iscsi_sht = {
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+- .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .max_sectors = 0xFFFF,
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index d37048c..60f5846 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1366,7 +1366,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
+ shost->max_lun = iscsit->max_lun;
+ shost->max_cmd_len = iscsit->max_cmd_len;
+ shost->transportt = scsit;
+- shost->transportt->create_work_queue = 1;
+ *hostno = shost->host_no;
+
+ session = iscsi_hostdata(shost->hostdata);
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 8133c22..f1c68f7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -65,6 +65,8 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define cdev_to_iscsi_internal(_cdev) \
+ container_of(_cdev, struct iscsi_internal, cdev)
+
++extern int attribute_container_init(void);
++
+ static void iscsi_transport_release(struct class_device *cdev)
+ {
+ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+@@ -80,6 +82,17 @@ static struct class iscsi_transport_class = {
+ .release = iscsi_transport_release,
+ };
+
++static void iscsi_host_class_release(struct class_device *class_dev)
++{
++ struct Scsi_Host *shost = transport_class_to_shost(class_dev);
++ put_device(&shost->shost_gendev);
++}
++
++struct class iscsi_host_class = {
++ .name = "iscsi_host",
++ .release = iscsi_host_class_release,
++};
++
+ static ssize_t
+ show_transport_handle(struct class_device *cdev, char *buf)
+ {
+@@ -115,10 +128,8 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct class_device *cdev)
++static int iscsi_setup_host(struct Scsi_Host *shost)
+ {
+- struct Scsi_Host *shost = dev_to_shost(dev);
+ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+@@ -127,12 +138,6 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ return 0;
+ }
+
+-static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+- "iscsi_host",
+- iscsi_setup_host,
+- NULL,
+- NULL);
+-
+ static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+ "iscsi_session",
+ NULL,
+@@ -216,24 +221,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_host *ihost = shost->shost_data;
+- struct iscsi_cls_session *session;
+-
+- mutex_lock(&ihost->mutex);
+- list_for_each_entry(session, &ihost->sessions, host_list) {
+- if ((channel == SCAN_WILD_CARD || channel == 0) &&
+- (id == SCAN_WILD_CARD || id == session->target_id))
+- scsi_scan_target(&session->dev, 0,
+- session->target_id, lun, 1);
+- }
+- mutex_unlock(&ihost->mutex);
+-
+- return 0;
+-}
+-
+ static void session_recovery_timedout(struct work_struct *work)
+ {
+ struct iscsi_cls_session *session =
+@@ -362,8 +349,6 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
+ list_del(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+- scsi_remove_target(&session->dev);
+-
+ transport_unregister_device(&session->dev);
+ device_del(&session->dev);
+ }
+@@ -1269,24 +1254,6 @@ static int iscsi_conn_match(struct attribute_container *cont,
+ return &priv->conn_cont.ac == cont;
+ }
+
+-static int iscsi_host_match(struct attribute_container *cont,
+- struct device *dev)
+-{
+- struct Scsi_Host *shost;
+- struct iscsi_internal *priv;
+-
+- if (!scsi_is_host_device(dev))
+- return 0;
+-
+- shost = dev_to_shost(dev);
+- if (!shost->transportt ||
+- shost->transportt->host_attrs.ac.class != &iscsi_host_class.class)
+- return 0;
+-
+- priv = to_iscsi_internal(shost->transportt);
+- return &priv->t.host_attrs.ac == cont;
+-}
+-
+ struct scsi_transport_template *
+ iscsi_register_transport(struct iscsi_transport *tt)
+ {
+@@ -1306,7 +1273,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ INIT_LIST_HEAD(&priv->list);
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+- priv->t.user_scan = iscsi_user_scan;
+
+ priv->cdev.class = &iscsi_transport_class;
+ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
+@@ -1319,12 +1285,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ goto unregister_cdev;
+
+ /* host parameters */
+- priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+- priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+- priv->t.host_attrs.ac.match = iscsi_host_match;
++ priv->t.host_attrs = &priv->host_attrs[0];
++ priv->t.host_class = &iscsi_host_class;
++ priv->t.host_setup = iscsi_setup_host;
+ priv->t.host_size = sizeof(struct iscsi_host);
+- priv->host_attrs[0] = NULL;
+- transport_container_register(&priv->t.host_attrs);
+
+ /* connection parameters */
+ priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
+@@ -1402,7 +1366,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+
+ transport_container_unregister(&priv->conn_cont);
+ transport_container_unregister(&priv->session_cont);
+- transport_container_unregister(&priv->t.host_attrs);
+
+ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
+ class_device_unregister(&priv->cdev);
+@@ -1420,6 +1420,7 @@ static __init int iscsi_transport_init(void)
+ ISCSI_TRANSPORT_VERSION);
+
+ atomic_set(&iscsi_session_nr, 0);
++ attribute_container_init();
+
+ err = class_register(&iscsi_transport_class);
+ if (err)
+ return err;
diff --git a/kernel_patches/backport/2.6.9_U5/iscsi_03_add_session_wq.patch b/kernel_patches/backport/2.6.9_U5/iscsi_03_add_session_wq.patch
new file mode 100644
index 0000000..5a77c07
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iscsi_03_add_session_wq.patch
@@ -0,0 +1,76 @@
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index a6f2303..5d62cc0 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -612,7 +612,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (resume_tx) {
+ iser_dbg("%ld resuming tx\n",jiffies);
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ queue_work(conn->session->wq, &conn->xmitwork);
+ }
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index e8020a5..43e9128 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -828,7 +828,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ spin_unlock(&session->lock);
+
+- scsi_queue_work(host, &conn->xmitwork);
++ queue_work(session->wq, &conn->xmitwork);
+ return 0;
+
+ reject:
+@@ -928,7 +928,7 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ else
+ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
+
+- scsi_queue_work(session->host, &conn->xmitwork);
++ queue_work(session->wq, &conn->xmitwork);
+ return 0;
+ }
+
+@@ -1415,6 +1415,9 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
+ INIT_LIST_HEAD(&mtask->running);
+ }
+
++ session->wq = create_singlethread_workqueue("");
++ BUG_ON(!session->wq);
++
+ if (scsi_add_host(shost, NULL))
+ goto add_host_fail;
+
+@@ -1462,6 +1465,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+
+ kfree(session->targetname);
+
++ destroy_workqueue(session->wq);
++
+ iscsi_destroy_session(cls_session);
+ scsi_host_put(shost);
+ module_put(owner);
+@@ -1595,7 +1600,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+
+ /* flush queued up work because we free the connection below */
+- scsi_flush_work(session->host);
++ flush_workqueue(session->wq);
+
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index ea0816d..e8a95f5 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -244,6 +244,8 @@ struct iscsi_session {
+ int mgmtpool_max; /* size of mgmt array */
+ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
+ struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
++
++ struct workqueue_struct *wq;
+ };
+
+ /*
diff --git a/kernel_patches/backport/2.6.9_U5/iscsi_04_inet_sock_to_opt.patch b/kernel_patches/backport/2.6.9_U5/iscsi_04_inet_sock_to_opt.patch
new file mode 100644
index 0000000..1fb2376
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iscsi_04_inet_sock_to_opt.patch
@@ -0,0 +1,13 @@
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 905efc4..f73a743 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -2027,7 +2027,7 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct inet_sock *inet;
++ struct inet_opt *inet;
+ struct ipv6_pinfo *np;
+ struct sock *sk;
+ int len;
diff --git a/kernel_patches/backport/2.6.9_U5/iscsi_05_release_host_lock_before_eh.patch b/kernel_patches/backport/2.6.9_U5/iscsi_05_release_host_lock_before_eh.patch
new file mode 100644
index 0000000..c994506
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iscsi_05_release_host_lock_before_eh.patch
@@ -0,0 +1,60 @@
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 7db081b..211944e 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -968,12 +968,14 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ struct iscsi_conn *conn = session->leadconn;
+ int fail_session = 0;
+
++ spin_unlock_irq(host->host_lock);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+ failed:
+ debug_scsi("failing host reset: session terminated "
+ "[CID %d age %d]\n", conn->id, session->age);
+ spin_unlock_bh(&session->lock);
++ spin_lock_irq(host->host_lock);
+ return FAILED;
+ }
+
+@@ -1005,6 +1007,7 @@ failed:
+ else
+ goto failed;
+ spin_unlock_bh(&session->lock);
++ spin_lock_irq(host->host_lock);
+
+ return SUCCESS;
+ }
+@@ -1162,13 +1165,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ struct iscsi_conn *conn;
+ struct iscsi_session *session;
+ int rc;
++ struct Scsi_Host *shost = sc->device->host;
+
++ spin_unlock_irq(shost->host_lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ debug_scsi("sc never reached iscsi layer or it completed.\n");
++ spin_lock_irq(shost->host_lock);
+ return SUCCESS;
+ }
+
+@@ -1253,6 +1259,7 @@ success_cleanup:
+
+ success_rel_mutex:
+ mutex_unlock(&conn->xmitmutex);
++ spin_lock_irq(shost->host_lock);
+ return SUCCESS;
+
+ failed:
+@@ -1260,6 +1267,7 @@ failed:
+ mutex_unlock(&conn->xmitmutex);
+
+ debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
++ spin_lock_irq(shost->host_lock);
+ return FAILED;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_abort);
diff --git a/kernel_patches/backport/2.6.9_U5/iscsi_06_scsi_addons.patch b/kernel_patches/backport/2.6.9_U5/iscsi_06_scsi_addons.patch
new file mode 100644
index 0000000..a114696
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iscsi_06_scsi_addons.patch
@@ -0,0 +1,75 @@
+diff --git a/drivers/scsi/init.c b/drivers/scsi/init.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/init.c
+@@ -0,0 +1 @@
++#include "src/init.c"
+diff --git a/drivers/scsi/attribute_container.c b/drivers/scsi/attribute_container.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/attribute_container.c
+@@ -0,0 +1 @@
++#include "../drivers/base/attribute_container.c"
+diff --git a/drivers/scsi/transport_class.c b/drivers/scsi/transport_class.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/transport_class.c
+@@ -0,0 +1 @@
++#include "../drivers/base/transport_class.c"
+diff --git a/drivers/scsi/klist.c b/drivers/scsi/klist.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/klist.c
+@@ -0,0 +1 @@
++#include "../../lib/klist.c"
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi.c
+@@ -0,0 +1 @@
++#include "src/scsi.c"
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_lib.c
+@@ -0,0 +1 @@
++#include "src/scsi_lib.c"
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_scan.c
+@@ -0,0 +1 @@
++#include "src/scsi_scan.c"
+diff --git a/drivers/scsi/libiscsi_f.c b/drivers/scsi/libiscsi_f.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/libiscsi_f.c
+@@ -0,0 +1 @@
++#include "libiscsi.c"
+diff --git a/drivers/scsi/scsi_transport_iscsi_f.c b/drivers/scsi/scsi_transport_iscsi_f.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_transport_iscsi_f.c
+@@ -0,0 +1 @@
++#include "scsi_transport_iscsi.c"
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index e212608..3bf2015 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -3,2 +3,7 @@
+ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
+ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
++
++CFLAGS_attribute_container.o = $(BACKPORT_INCLUDES)/src/
++
++scsi_transport_iscsi-y := scsi_transport_iscsi_f.o scsi.o scsi_lib.o init.o klist.o attribute_container.o transport_class.o
++libiscsi-y := libiscsi_f.o scsi_scan.o
diff --git a/kernel_patches/backport/2.6.9_U5/iser_00_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.9_U5/iser_00_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..ff5d719
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_00_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From 12b757e92112750b4bc90cf8150d20484d684dcf Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 21 Aug 2008 14:28:56 +0300
+Subject: [PATCH] iser_sync_kernel_code_with_2.6.26
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U5/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch b/kernel_patches/backport/2.6.9_U5/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
new file mode 100644
index 0000000..101fdc6
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
@@ -0,0 +1,44 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..75ecabe 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -211,10 +211,10 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ int error = 0;
+
+ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(ctask->sc) == 0);
++ BUG_ON(ctask->sc->request_bufflen == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->itt, ctask->sc->request_bufflen,
+ ctask->imm_count, ctask->unsol_count);
+ }
+
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 5d62cc0..1ae80d8 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -349,12 +349,18 @@ int iser_send_command(struct iscsi_conn *conn,
+ else
+ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+- if (scsi_sg_count(sc)) { /* using a scatter list */
+- data_buf->buf = scsi_sglist(sc);
+- data_buf->size = scsi_sg_count(sc);
++ if (sc->use_sg) { /* using a scatter list */
++ data_buf->buf = sc->request_buffer;
++ data_buf->size = sc->use_sg;
++ } else if (sc->request_bufflen) {
++ /* using a single buffer - convert it into one entry SG */
++ sg_init_one(&data_buf->sg_single,
++ sc->request_buffer, sc->request_bufflen);
++ data_buf->buf = &data_buf->sg_single;
++ data_buf->size = 1;
+ }
+
+- data_buf->data_len = scsi_bufflen(sc);
++ data_buf->data_len = sc->request_bufflen;
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ err = iser_prepare_read_cmd(ctask, edtl);
diff --git a/kernel_patches/backport/2.6.9_U5/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch b/kernel_patches/backport/2.6.9_U5/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
new file mode 100644
index 0000000..7b21cba
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
@@ -0,0 +1,12 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..933429b 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -586,7 +586,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+- ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
diff --git a/kernel_patches/backport/2.6.9_U5/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch b/kernel_patches/backport/2.6.9_U5/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
new file mode 100644
index 0000000..d72eb5a
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
@@ -0,0 +1,74 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..7baac99 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -368,8 +368,7 @@ static struct iscsi_transport iscsi_iser_transport;
+ static struct iscsi_cls_session *
+ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct scsi_transport_template *scsit,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+@@ -380,13 +380,7 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iser_desc *desc;
+
+- /*
+- * we do not support setting can_queue cmd_per_lun from userspace yet
+- * because we preallocate so many resources
+- */
+ cls_session = iscsi_session_setup(iscsit, scsit,
+- ISCSI_DEF_XMIT_CMDS_MAX,
+- ISCSI_MAX_CMD_PER_LUN,
+ sizeof(struct iscsi_iser_cmd_task),
+ sizeof(struct iser_desc),
+ initial_cmdsn, &hn);
+@@ -550,7 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
++ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 1ee867b..671faff 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -105,7 +105,7 @@
+ #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
+ * SCSI_TMFUNC(2), LOGOUT(1) */
+
+-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
++#define ISER_QP_MAX_RECV_DTOS (ISCSI_XMIT_CMDS_MAX + \
+ ISER_MAX_RX_MISC_PDUS + \
+ ISER_MAX_TX_MISC_PDUS)
+
+@@ -117,7 +117,7 @@
+
+ #define ISER_INFLIGHT_DATAOUTS 8
+
+-#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
++#define ISER_QP_MAX_REQ_DTOS (ISCSI_XMIT_CMDS_MAX * \
+ (1 + ISER_INFLIGHT_DATAOUTS) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 654a4dc..f3d8ba5 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -154,8 +154,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+ params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
+ /* make the pool size twice the max number of SCSI commands *
+ * the ML is expected to queue, watermark for unmap at 50% */
+- params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
+- params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
++ params.pool_size = ISCSI_XMIT_CMDS_MAX * 2;
++ params.dirty_watermark = ISCSI_XMIT_CMDS_MAX;
+ params.cache = 0;
+ params.flush_function = NULL;
+ params.access = (IB_ACCESS_LOCAL_WRITE |
diff --git a/kernel_patches/backport/2.6.9_U5/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch b/kernel_patches/backport/2.6.9_U5/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
new file mode 100644
index 0000000..26fa09c
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
@@ -0,0 +1,38 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 8f7b859..5f82d6c 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -134,9 +134,18 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iser_ctask->command_sent = 0;
+ iser_ctask->iser_conn = iser_conn;
++ if (sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(sc->request_bufflen == 0);
++
++ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
++ ctask->itt, sc->request_bufflen, ctask->imm_count,
++ ctask->unsol_count);
++ }
++
+ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+@@ -210,14 +219,6 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(ctask->sc->request_bufflen == 0);
+-
+- debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, ctask->sc->request_bufflen,
+- ctask->imm_count, ctask->unsol_count);
+- }
+-
+ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
+ conn->id, ctask->itt);
+
diff --git a/kernel_patches/backport/2.6.9_U5/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch b/kernel_patches/backport/2.6.9_U5/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
new file mode 100644
index 0000000..417415f
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
@@ -0,0 +1,18 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5f82d6c..3a67d76 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -574,11 +574,8 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+- ISCSI_TARGET_NAME | ISCSI_TPGT |
+- ISCSI_USERNAME | ISCSI_PASSWORD |
+- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO,
++ ISCSI_TARGET_NAME |
++ ISCSI_TPGT,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_INITIATOR_NAME,
+ .host_template = &iscsi_iser_sht,
diff --git a/kernel_patches/backport/2.6.9_U5/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch b/kernel_patches/backport/2.6.9_U5/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
new file mode 100644
index 0000000..0b1a4c4
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
@@ -0,0 +1,16 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index c5941fa..2f4f125 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -140,8 +140,8 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ iser_ctask->iser_conn = iser_conn;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(sc->request_bufflen == 0);
++ BUG_ON(ctask->total_length == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, sc->request_bufflen, ctask->imm_count,
++ ctask->itt, ctask->total_length, ctask->imm_count,
+ ctask->unsol_count);
+ }
+
diff --git a/kernel_patches/backport/2.6.9_U5/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch b/kernel_patches/backport/2.6.9_U5/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
new file mode 100644
index 0000000..f207af3
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
@@ -0,0 +1,14 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 2f4f125..940bf98 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -576,8 +576,7 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME |
+ ISCSI_TPGT,
+- .host_param_mask = ISCSI_HOST_HWADDRESS |
+- ISCSI_HOST_INITIATOR_NAME,
++ .host_param_mask = ISCSI_HOST_HWADDRESS,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
diff --git a/kernel_patches/backport/2.6.9_U5/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch b/kernel_patches/backport/2.6.9_U5/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
new file mode 100644
index 0000000..f9dceb1
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
@@ -0,0 +1,22 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 940bf98..6a35eff 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -576,7 +576,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME |
+ ISCSI_TPGT,
+- .host_param_mask = ISCSI_HOST_HWADDRESS,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
+@@ -593,9 +593,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+ .stop_conn = iscsi_conn_stop,
+- /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
+- .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
diff --git a/kernel_patches/backport/2.6.9_U5/iser_09_fix_inclusion_order.patch b/kernel_patches/backport/2.6.9_U5/iser_09_fix_inclusion_order.patch
new file mode 100644
index 0000000..3c2a969
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_09_fix_inclusion_order.patch
@@ -0,0 +1,13 @@
+--- linux-2.6.20-rc7-orig/drivers/infiniband/ulp/iser/iscsi_iser.c 2007-02-08 09:13:43.000000000 +0200
++++ linux-2.6.20-rc7/drivers/infiniband/ulp/iser/iscsi_iser.c 2007-02-08 09:14:31.000000000 +0200
+@@ -70,9 +70,8 @@
+ #include <scsi/scsi_tcq.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi.h>
+-#include <scsi/scsi_transport_iscsi.h>
+-
+ #include "iscsi_iser.h"
++#include <scsi/scsi_transport_iscsi.h>
+
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
diff --git a/kernel_patches/backport/2.6.9_U5/iser_10_fix_struct_scsi_host_template.patch b/kernel_patches/backport/2.6.9_U5/iser_10_fix_struct_scsi_host_template.patch
new file mode 100644
index 0000000..5b28ac4
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_10_fix_struct_scsi_host_template.patch
@@ -0,0 +1,31 @@
+From 828e0ad429b92cf75781770ceb9ef7086f34fde2 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:31:42 +0300
+Subject: [PATCH] fix_struct_scsi_host_template
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 9bf24c6..de1e783 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -542,13 +542,11 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .module = THIS_MODULE,
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+- .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+- .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "iscsi_iser",
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U5/iser_11_add_fmr_unalign_cnt.patch b/kernel_patches/backport/2.6.9_U5/iser_11_add_fmr_unalign_cnt.patch
new file mode 100644
index 0000000..ef2a2d6
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_11_add_fmr_unalign_cnt.patch
@@ -0,0 +1,25 @@
+From 1255c8e5209ce19644e83e353c260f2eddc62cca Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:54:57 +0300
+Subject: [PATCH] add fmr_unalign_cnt to struct iscsi_conn
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/libiscsi.h | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index ea0816d..182421f 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -197,6 +197,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_queue {
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U5/iser_12_remove_hdr_max.patch b/kernel_patches/backport/2.6.9_U5/iser_12_remove_hdr_max.patch
new file mode 100644
index 0000000..c475001
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_12_remove_hdr_max.patch
@@ -0,0 +1,25 @@
+From 97672ef8a29da5e16774d1de9527b2cc29415e36 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:59:16 +0300
+Subject: [PATCH] remove hdr_max
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index de1e783..6451e9d 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -394,7 +394,6 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ ctask = session->cmds[i];
+ iser_ctask = ctask->dd_data;
+ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
+- ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+
+ for (i = 0; i < session->mgmtpool_max; i++) {
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U5/iser_13_fix_netlink_kernel_create.patch b/kernel_patches/backport/2.6.9_U5/iser_13_fix_netlink_kernel_create.patch
new file mode 100644
index 0000000..d47df44
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_13_fix_netlink_kernel_create.patch
@@ -0,0 +1,26 @@
+From db61fe2c3062d8918e793ddc7e1a8cc3694bf620 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:20:42 +0300
+Subject: [PATCH] fix netlink_kernel_create
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index e969ef7..a2f4fb7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -1401,7 +1401,7 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ nls = netlink_kernel_create(NULL, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
+ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U5/iser_14_sync_attribute_container.c_from_ofed1.3.patch b/kernel_patches/backport/2.6.9_U5/iser_14_sync_attribute_container.c_from_ofed1.3.patch
new file mode 100644
index 0000000..e926007
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_14_sync_attribute_container.c_from_ofed1.3.patch
@@ -0,0 +1,394 @@
+From bed65721f623039a119b5ff03c6c1fe44a1ccfb3 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:26:20 +0300
+Subject: [PATCH] sync attribute_container.c from ofed1.3
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/base/attribute_container.c | 100 +++++++++++++++++------------------
+ drivers/base/transport_class.c | 21 ++++----
+ 2 files changed, 60 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
+index f57652d..7370d7c 100644
+--- a/drivers/base/attribute_container.c
++++ b/drivers/base/attribute_container.c
+@@ -27,21 +27,21 @@
+ struct internal_container {
+ struct klist_node node;
+ struct attribute_container *cont;
+- struct device classdev;
++ struct class_device classdev;
+ };
+
+ static void internal_container_klist_get(struct klist_node *n)
+ {
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+- get_device(&ic->classdev);
++ class_device_get(&ic->classdev);
+ }
+
+ static void internal_container_klist_put(struct klist_node *n)
+ {
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+- put_device(&ic->classdev);
++ class_device_put(&ic->classdev);
+ }
+
+
+@@ -53,7 +53,7 @@ static void internal_container_klist_put(struct klist_node *n)
+ * Returns the container associated with this classdev.
+ */
+ struct attribute_container *
+-attribute_container_classdev_to_container(struct device *classdev)
++attribute_container_classdev_to_container(struct class_device *classdev)
+ {
+ struct internal_container *ic =
+ container_of(classdev, struct internal_container, classdev);
+@@ -61,7 +61,7 @@ attribute_container_classdev_to_container(struct device *classdev)
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
+
+-static LIST_HEAD(attribute_container_list);
++static struct list_head attribute_container_list;
+
+ static DEFINE_MUTEX(attribute_container_mutex);
+
+@@ -110,11 +110,11 @@ attribute_container_unregister(struct attribute_container *cont)
+ EXPORT_SYMBOL_GPL(attribute_container_unregister);
+
+ /* private function used as class release */
+-static void attribute_container_release(struct device *classdev)
++static void attribute_container_release(struct class_device *classdev)
+ {
+ struct internal_container *ic
+ = container_of(classdev, struct internal_container, classdev);
+- struct device *dev = classdev->parent;
++ struct device *dev = classdev->dev;
+
+ kfree(ic);
+ put_device(dev);
+@@ -129,12 +129,12 @@ static void attribute_container_release(struct device *classdev)
+ * This function allocates storage for the class device(s) to be
+ * attached to dev (one for each matching attribute_container). If no
+ * fn is provided, the code will simply register the class device via
+- * device_add. If a function is provided, it is expected to add
++ * class_device_add. If a function is provided, it is expected to add
+ * the class device at the appropriate time. One of the things that
+ * might be necessary is to allocate and initialise the classdev and
+ * then add it a later time. To do this, call this routine for
+ * allocation and initialisation and then use
+- * attribute_container_device_trigger() to call device_add() on
++ * attribute_container_device_trigger() to call class_device_add() on
+ * it. Note: after this, the class device contains a reference to dev
+ * which is not relinquished until the release of the classdev.
+ */
+@@ -142,7 +142,7 @@ void
+ attribute_container_add_device(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -163,11 +163,11 @@ attribute_container_add_device(struct device *dev,
+ }
+
+ ic->cont = cont;
+- device_initialize(&ic->classdev);
+- ic->classdev.parent = get_device(dev);
++ class_device_initialize(&ic->classdev);
++ ic->classdev.dev = get_device(dev);
+ ic->classdev.class = cont->class;
+- cont->class->dev_release = attribute_container_release;
+- strcpy(ic->classdev.bus_id, dev->bus_id);
++ cont->class->release = attribute_container_release;
++ strcpy(ic->classdev.class_id, dev->bus_id);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else
+@@ -195,19 +195,20 @@ attribute_container_add_device(struct device *dev,
+ * @fn: A function to call to remove the device
+ *
+ * This routine triggers device removal. If fn is NULL, then it is
+- * simply done via device_unregister (note that if something
++ * simply done via class_device_unregister (note that if something
+ * still has a reference to the classdev, then the memory occupied
+ * will not be freed until the classdev is released). If you want a
+ * two phase release: remove from visibility and then delete the
+ * device, then you should use this routine with a fn that calls
+- * device_del() and then use attribute_container_device_trigger()
+- * to do the final put on the classdev.
++ * class_device_del() and then use
++ * attribute_container_device_trigger() to do the final put on the
++ * classdev.
+ */
+ void
+ attribute_container_remove_device(struct device *dev,
+ void (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -223,14 +224,14 @@ attribute_container_remove_device(struct device *dev,
+ continue;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (dev != ic->classdev.parent)
++ if (dev != ic->classdev.dev)
+ continue;
+ klist_del(&ic->node);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else {
+ attribute_container_remove_attrs(&ic->classdev);
+- device_unregister(&ic->classdev);
++ class_device_unregister(&ic->classdev);
+ }
+ }
+ }
+@@ -251,7 +252,7 @@ void
+ attribute_container_device_trigger(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -269,7 +270,7 @@ attribute_container_device_trigger(struct device *dev,
+ }
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (dev == ic->classdev.parent)
++ if (dev == ic->classdev.dev)
+ fn(cont, dev, &ic->classdev);
+ }
+ }
+@@ -312,23 +313,18 @@ attribute_container_trigger(struct device *dev,
+ * attributes listed in the container
+ */
+ int
+-attribute_container_add_attrs(struct device *classdev)
++attribute_container_add_attrs(struct class_device *classdev)
+ {
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+- struct device_attribute **attrs = cont->attrs;
++ struct class_device_attribute **attrs = cont->attrs;
+ int i, error;
+
+- BUG_ON(attrs && cont->grp);
+-
+- if (!attrs && !cont->grp)
++ if (!attrs)
+ return 0;
+
+- if (cont->grp)
+- return sysfs_create_group(&classdev->kobj, cont->grp);
+-
+ for (i = 0; attrs[i]; i++) {
+- error = device_create_file(classdev, attrs[i]);
++ error = class_device_create_file(classdev, attrs[i]);
+ if (error)
+ return error;
+ }
+@@ -337,18 +333,18 @@ attribute_container_add_attrs(struct device *classdev)
+ }
+
+ /**
+- * attribute_container_add_class_device - same function as device_add
++ * attribute_container_add_class_device - same function as class_device_add
+ *
+ * @classdev: the class device to add
+ *
+- * This performs essentially the same function as device_add except for
++ * This performs essentially the same function as class_device_add except for
+ * attribute containers, namely add the classdev to the system and then
+ * create the attribute files
+ */
+ int
+-attribute_container_add_class_device(struct device *classdev)
++attribute_container_add_class_device(struct class_device *classdev)
+ {
+- int error = device_add(classdev);
++ int error = class_device_add(classdev);
+ if (error)
+ return error;
+ return attribute_container_add_attrs(classdev);
+@@ -363,7 +359,7 @@ attribute_container_add_class_device(struct device *classdev)
+ int
+ attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ return attribute_container_add_class_device(classdev);
+ }
+@@ -375,23 +371,18 @@ attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ *
+ */
+ void
+-attribute_container_remove_attrs(struct device *classdev)
++attribute_container_remove_attrs(struct class_device *classdev)
+ {
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+- struct device_attribute **attrs = cont->attrs;
++ struct class_device_attribute **attrs = cont->attrs;
+ int i;
+
+- if (!attrs && !cont->grp)
++ if (!attrs)
+ return;
+
+- if (cont->grp) {
+- sysfs_remove_group(&classdev->kobj, cont->grp);
+- return ;
+- }
+-
+ for (i = 0; attrs[i]; i++)
+- device_remove_file(classdev, attrs[i]);
++ class_device_remove_file(classdev, attrs[i]);
+ }
+
+ /**
+@@ -400,13 +391,13 @@ attribute_container_remove_attrs(struct device *classdev)
+ * @classdev: the class device
+ *
+ * This function simply removes all the attribute files and then calls
+- * device_del.
++ * class_device_del.
+ */
+ void
+-attribute_container_class_device_del(struct device *classdev)
++attribute_container_class_device_del(struct class_device *classdev)
+ {
+ attribute_container_remove_attrs(classdev);
+- device_del(classdev);
++ class_device_del(classdev);
+ }
+
+ /**
+@@ -418,16 +409,16 @@ attribute_container_class_device_del(struct device *classdev)
+ * Looks up the device in the container's list of class devices and returns
+ * the corresponding class_device.
+ */
+-struct device *
++struct class_device *
+ attribute_container_find_class_device(struct attribute_container *cont,
+ struct device *dev)
+ {
+- struct device *cdev = NULL;
++ struct class_device *cdev = NULL;
+ struct internal_container *ic;
+ struct klist_iter iter;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (ic->classdev.parent == dev) {
++ if (ic->classdev.dev == dev) {
+ cdev = &ic->classdev;
+ /* FIXME: must exit iterator then break */
+ klist_iter_exit(&iter);
+@@ -438,3 +429,10 @@ attribute_container_find_class_device(struct attribute_container *cont,
+ return cdev;
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
++
++int __init
++attribute_container_init(void)
++{
++ INIT_LIST_HEAD(&attribute_container_list);
++ return 0;
++}
+diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
+index 84997ef..f25e7c6 100644
+--- a/drivers/base/transport_class.c
++++ b/drivers/base/transport_class.c
+@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(transport_class_unregister);
+
+ static int anon_transport_dummy_function(struct transport_container *tc,
+ struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ /* do nothing */
+ return 0;
+@@ -108,14 +108,13 @@ EXPORT_SYMBOL_GPL(anon_transport_class_register);
+ */
+ void anon_transport_class_unregister(struct anon_transport_class *atc)
+ {
+- if (unlikely(attribute_container_unregister(&atc->container)))
+- BUG();
++ attribute_container_unregister(&atc->container);
+ }
+ EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
+
+ static int transport_setup_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+@@ -127,7 +126,9 @@ static int transport_setup_classdev(struct attribute_container *cont,
+ }
+
+ /**
+- * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
++ * transport_setup_device - declare a new dev for transport class association
++ * but don't make it visible yet.
++ *
+ * @dev: the generic device representing the entity being added
+ *
+ * Usually, dev represents some component in the HBA system (either
+@@ -149,7 +150,7 @@ EXPORT_SYMBOL_GPL(transport_setup_device);
+
+ static int transport_add_class_device(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ int error = attribute_container_add_class_device(classdev);
+ struct transport_container *tcont =
+@@ -181,7 +182,7 @@ EXPORT_SYMBOL_GPL(transport_add_device);
+
+ static int transport_configure(struct attribute_container *cont,
+ struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+@@ -212,7 +213,7 @@ EXPORT_SYMBOL_GPL(transport_configure_device);
+
+ static int transport_remove_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_container *tcont =
+ attribute_container_to_transport_container(cont);
+@@ -251,12 +252,12 @@ EXPORT_SYMBOL_GPL(transport_remove_device);
+
+ static void transport_destroy_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+
+ if (tclass->remove != anon_transport_dummy_function)
+- put_device(classdev);
++ class_device_put(classdev);
+ }
+
+
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U5/iser_15_fix_iscsi_free_mgmt_task.patch b/kernel_patches/backport/2.6.9_U5/iser_15_fix_iscsi_free_mgmt_task.patch
new file mode 100644
index 0000000..7a3a3ea
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U5/iser_15_fix_iscsi_free_mgmt_task.patch
@@ -0,0 +1,28 @@
+From 5a9fd2300982aca58f1306bdb98cab878998a607 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:53:59 +0300
+Subject: [PATCH] fix iscsi_free_mgmt_task
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iser_initiator.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 4e20c8b..e7f2399 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -627,7 +627,9 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&conn->session->lock);
+- iscsi_free_mgmt_task(conn, mtask);
++ list_del(&mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
+ spin_unlock(&session->lock);
+ }
+ }
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U6/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch b/kernel_patches/backport/2.6.9_U6/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
new file mode 100644
index 0000000..e35b289
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
@@ -0,0 +1,9402 @@
+From f75042cdafb7f42cd1f9a244872ae2f7896e3278 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Wed, 20 Aug 2008 14:32:54 +0300
+Subject: [PATCH 1/1] iscsi_01_sync_kernel_code_with_ofed_1_2_5
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 2537 +++++++++++++++++++----------------
+ drivers/scsi/iscsi_tcp.h | 136 ++-
+ drivers/scsi/libiscsi.c | 2501 ++++++++++++----------------------
+ drivers/scsi/scsi_transport_iscsi.c | 1208 +++++------------
+ include/scsi/iscsi_if.h | 119 +--
+ include/scsi/iscsi_proto.h | 23 +-
+ include/scsi/libiscsi.h | 247 ++---
+ include/scsi/scsi_transport_iscsi.h | 148 +--
+ 8 files changed, 2862 insertions(+), 4057 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..c9a3abf 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -29,15 +29,14 @@
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/inet.h>
+-#include <linux/file.h>
+ #include <linux/blkdev.h>
+ #include <linux/crypto.h>
+ #include <linux/delay.h>
+ #include <linux/kfifo.h>
+ #include <linux/scatterlist.h>
++#include <linux/mutex.h>
+ #include <net/tcp.h>
+ #include <scsi/scsi_cmnd.h>
+-#include <scsi/scsi_device.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_transport_iscsi.h>
+@@ -48,7 +47,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus at yahoo.com>, "
+ "Alex Aizman <itn780 at yahoo.com>");
+ MODULE_DESCRIPTION("iSCSI/TCP data-path");
+ MODULE_LICENSE("GPL");
+-#undef DEBUG_TCP
++/* #define DEBUG_TCP */
+ #define DEBUG_ASSERT
+
+ #ifdef DEBUG_TCP
+@@ -64,515 +63,200 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+-static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment);
+-
+-/*
+- * Scatterlist handling: inside the iscsi_segment, we
+- * remember an index into the scatterlist, and set data/size
+- * to the current scatterlist entry. For highmem pages, we
+- * kmap as needed.
+- *
+- * Note that the page is unmapped when we return from
+- * TCP's data_ready handler, so we may end up mapping and
+- * unmapping the same page repeatedly. The whole reason
+- * for this is that we shouldn't keep the page mapped
+- * outside the softirq.
+- */
+-
+-/**
+- * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
+- * @segment: the buffer object
+- * @sg: scatterlist
+- * @offset: byte offset into that sg entry
+- *
+- * This function sets up the segment so that subsequent
+- * data is copied to the indicated sg entry, at the given
+- * offset.
+- */
+ static inline void
+-iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
+- struct scatterlist *sg, unsigned int offset)
++iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
+ {
+- segment->sg = sg;
+- segment->sg_offset = offset;
+- segment->size = min(sg->length - offset,
+- segment->total_size - segment->total_copied);
+- segment->data = NULL;
++ ibuf->sg.page = virt_to_page(vbuf);
++ ibuf->sg.offset = offset_in_page(vbuf);
++ ibuf->sg.length = size;
++ ibuf->sent = 0;
++ ibuf->use_sendmsg = 1;
+ }
+
+-/**
+- * iscsi_tcp_segment_map - map the current S/G page
+- * @segment: iscsi_segment
+- * @recv: 1 if called from recv path
+- *
+- * We only need to possibly kmap data if scatter lists are being used,
+- * because the iscsi passthrough and internal IO paths will never use high
+- * mem pages.
+- */
+ static inline void
+-iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
++iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
+ {
+- struct scatterlist *sg;
+-
+- if (segment->data != NULL || !segment->sg)
+- return;
+-
+- sg = segment->sg;
+- BUG_ON(segment->sg_mapped);
+- BUG_ON(sg->length == 0);
+-
++ ibuf->sg.page = sg->page;
++ ibuf->sg.offset = sg->offset;
++ ibuf->sg.length = sg->length;
+ /*
+- * If the page count is greater than one it is ok to send
+- * to the network layer's zero copy send path. If not we
+- * have to go the slow sendmsg path. We always map for the
+- * recv path.
++ * Fastpath: sg element fits into single page
+ */
+- if (page_count(sg_page(sg)) >= 1 && !recv)
+- return;
+-
+- debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
+- segment);
+- segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+- segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
++ if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
++ ibuf->use_sendmsg = 0;
++ else
++ ibuf->use_sendmsg = 1;
++ ibuf->sent = 0;
+ }
+
+-static inline void
+-iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
++static inline int
++iscsi_buf_left(struct iscsi_buf *ibuf)
+ {
+- debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
++ int rc;
+
+- if (segment->sg_mapped) {
+- debug_tcp("iscsi_tcp_segment_unmap valid\n");
+- kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+- segment->sg_mapped = NULL;
+- segment->data = NULL;
+- }
++ rc = ibuf->sg.length - ibuf->sent;
++ BUG_ON(rc < 0);
++ return rc;
+ }
+
+-/*
+- * Splice the digest buffer into the buffer
+- */
+ static inline void
+-iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
++iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
++ u8* crc)
+ {
+- segment->data = digest;
+- segment->digest_len = ISCSI_DIGEST_SIZE;
+- segment->total_size += ISCSI_DIGEST_SIZE;
+- segment->size = ISCSI_DIGEST_SIZE;
+- segment->copied = 0;
+- segment->sg = NULL;
+- segment->hash = NULL;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++
++ crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
++ buf->sg.length = tcp_conn->hdr_size;
+ }
+
+-/**
+- * iscsi_tcp_segment_done - check whether the segment is complete
+- * @segment: iscsi segment to check
+- * @recv: set to one of this is called from the recv path
+- * @copied: number of bytes copied
+- *
+- * Check if we're done receiving this segment. If the receive
+- * buffer is full but we expect more data, move on to the
+- * next entry in the scatterlist.
+- *
+- * If the amount of data we received isn't a multiple of 4,
+- * we will transparently receive the pad bytes, too.
+- *
+- * This function must be re-entrant.
+- */
+ static inline int
+-iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
++iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
+ {
+- static unsigned char padbuf[ISCSI_PAD_LEN];
+- struct scatterlist sg;
+- unsigned int pad;
++ struct sk_buff *skb = tcp_conn->in.skb;
++
++ tcp_conn->in.zero_copy_hdr = 0;
+
+- debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
+- segment->size, recv ? "recv" : "xmit");
+- if (segment->hash && copied) {
++ if (tcp_conn->in.copy >= tcp_conn->hdr_size &&
++ tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
+ /*
+- * If a segment is kmapd we must unmap it before sending
+- * to the crypto layer since that will try to kmap it again.
++ * Zero-copy PDU Header: using connection context
++ * to store header pointer.
+ */
+- iscsi_tcp_segment_unmap(segment);
+-
+- if (!segment->data) {
+- sg_init_table(&sg, 1);
+- sg_set_page(&sg, sg_page(segment->sg), copied,
+- segment->copied + segment->sg_offset +
+- segment->sg->offset);
+- } else
+- sg_init_one(&sg, segment->data + segment->copied,
+- copied);
+- crypto_hash_update(segment->hash, &sg, copied);
+- }
+-
+- segment->copied += copied;
+- if (segment->copied < segment->size) {
+- iscsi_tcp_segment_map(segment, recv);
+- return 0;
+- }
+-
+- segment->total_copied += segment->copied;
+- segment->copied = 0;
+- segment->size = 0;
+-
+- /* Unmap the current scatterlist page, if there is one. */
+- iscsi_tcp_segment_unmap(segment);
+-
+- /* Do we have more scatterlist entries? */
+- debug_tcp("total copied %u total size %u\n", segment->total_copied,
+- segment->total_size);
+- if (segment->total_copied < segment->total_size) {
+- /* Proceed to the next entry in the scatterlist. */
+- iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
+- 0);
+- iscsi_tcp_segment_map(segment, recv);
+- BUG_ON(segment->size == 0);
+- return 0;
+- }
+-
+- /* Do we need to handle padding? */
+- pad = iscsi_padding(segment->total_copied);
+- if (pad != 0) {
+- debug_tcp("consume %d pad bytes\n", pad);
+- segment->total_size += pad;
+- segment->size = pad;
+- segment->data = padbuf;
+- return 0;
+- }
+-
+- /*
+- * Set us up for transferring the data digest. hdr digest
+- * is completely handled in hdr done function.
+- */
+- if (segment->hash) {
+- crypto_hash_final(segment->hash, segment->digest);
+- iscsi_tcp_segment_splice_digest(segment,
+- recv ? segment->recv_digest : segment->digest);
+- return 0;
+- }
+-
+- return 1;
+-}
+-
+-/**
+- * iscsi_tcp_xmit_segment - transmit segment
+- * @tcp_conn: the iSCSI TCP connection
+- * @segment: the buffer to transmnit
+- *
+- * This function transmits as much of the buffer as
+- * the network layer will accept, and returns the number of
+- * bytes transmitted.
+- *
+- * If CRC hashing is enabled, the function will compute the
+- * hash as it goes. When the entire segment has been transmitted,
+- * it will retrieve the hash value and send it as well.
+- */
+-static int
+-iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct socket *sk = tcp_conn->sock;
+- unsigned int copied = 0;
+- int r = 0;
+-
+- while (!iscsi_tcp_segment_done(segment, 0, r)) {
+- struct scatterlist *sg;
+- unsigned int offset, copy;
+- int flags = 0;
+-
+- r = 0;
+- offset = segment->copied;
+- copy = segment->size - offset;
+-
+- if (segment->total_copied + segment->size < segment->total_size)
+- flags |= MSG_MORE;
+-
+- /* Use sendpage if we can; else fall back to sendmsg */
+- if (!segment->data) {
+- sg = segment->sg;
+- offset += segment->sg_offset + sg->offset;
+- r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
+- flags);
++ if (skb_shinfo(skb)->frag_list == NULL &&
++ !skb_shinfo(skb)->nr_frags) {
++ tcp_conn->in.hdr = (struct iscsi_hdr *)
++ ((char*)skb->data + tcp_conn->in.offset);
++ tcp_conn->in.zero_copy_hdr = 1;
+ } else {
+- struct msghdr msg = { .msg_flags = flags };
+- struct kvec iov = {
+- .iov_base = segment->data + offset,
+- .iov_len = copy
+- };
+-
+- r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
++ /* ignoring return code since we checked
++ * in.copy before */
++ skb_copy_bits(skb, tcp_conn->in.offset,
++ &tcp_conn->hdr, tcp_conn->hdr_size);
++ tcp_conn->in.hdr = &tcp_conn->hdr;
+ }
++ tcp_conn->in.offset += tcp_conn->hdr_size;
++ tcp_conn->in.copy -= tcp_conn->hdr_size;
++ } else {
++ int hdr_remains;
++ int copylen;
+
+- if (r < 0) {
+- iscsi_tcp_segment_unmap(segment);
+- if (copied || r == -EAGAIN)
+- break;
+- return r;
+- }
+- copied += r;
+- }
+- return copied;
+-}
+-
+-/**
+- * iscsi_tcp_segment_recv - copy data to segment
+- * @tcp_conn: the iSCSI TCP connection
+- * @segment: the buffer to copy to
+- * @ptr: data pointer
+- * @len: amount of data available
+- *
+- * This function copies up to @len bytes to the
+- * given buffer, and returns the number of bytes
+- * consumed, which can actually be less than @len.
+- *
+- * If hash digest is enabled, the function will update the
+- * hash while copying.
+- * Combining these two operations doesn't buy us a lot (yet),
+- * but in the future we could implement combined copy+crc,
+- * just way we do for network layer checksums.
+- */
+-static int
+-iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment, const void *ptr,
+- unsigned int len)
+-{
+- unsigned int copy = 0, copied = 0;
+-
+- while (!iscsi_tcp_segment_done(segment, 1, copy)) {
+- if (copied == len) {
+- debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
+- len);
+- break;
+- }
+-
+- copy = min(len - copied, segment->size - segment->copied);
+- debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
+- memcpy(segment->data + segment->copied, ptr + copied, copy);
+- copied += copy;
+- }
+- return copied;
+-}
+-
+-static inline void
+-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
+- unsigned char digest[ISCSI_DIGEST_SIZE])
+-{
+- struct scatterlist sg;
+-
+- sg_init_one(&sg, hdr, hdrlen);
+- crypto_hash_digest(hash, &sg, hdrlen, digest);
+-}
+-
+-static inline int
+-iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- if (!segment->digest_len)
+- return 1;
+-
+- if (memcmp(segment->recv_digest, segment->digest,
+- segment->digest_len)) {
+- debug_scsi("digest mismatch\n");
+- return 0;
+- }
++ /*
++ * PDU header scattered across SKB's,
++ * copying it... This'll happen quite rarely.
++ */
+
+- return 1;
+-}
++ if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER)
++ tcp_conn->in.hdr_offset = 0;
+
+-/*
+- * Helper function to set up segment buffer
+- */
+-static inline void
+-__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
+- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+-{
+- memset(segment, 0, sizeof(*segment));
+- segment->total_size = size;
+- segment->done = done;
++ hdr_remains = tcp_conn->hdr_size - tcp_conn->in.hdr_offset;
++ BUG_ON(hdr_remains <= 0);
+
+- if (hash) {
+- segment->hash = hash;
+- crypto_hash_init(hash);
+- }
+-}
++ copylen = min(tcp_conn->in.copy, hdr_remains);
++ skb_copy_bits(skb, tcp_conn->in.offset,
++ (char*)&tcp_conn->hdr + tcp_conn->in.hdr_offset,
++ copylen);
+
+-static inline void
+-iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
+- size_t size, iscsi_segment_done_fn_t *done,
+- struct hash_desc *hash)
+-{
+- __iscsi_segment_init(segment, size, done, hash);
+- segment->data = data;
+- segment->size = size;
+-}
++ debug_tcp("PDU gather offset %d bytes %d in.offset %d "
++ "in.copy %d\n", tcp_conn->in.hdr_offset, copylen,
++ tcp_conn->in.offset, tcp_conn->in.copy);
+
+-static inline int
+-iscsi_segment_seek_sg(struct iscsi_segment *segment,
+- struct scatterlist *sg_list, unsigned int sg_count,
+- unsigned int offset, size_t size,
+- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+-{
+- struct scatterlist *sg;
+- unsigned int i;
+-
+- debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
+- offset, size);
+- __iscsi_segment_init(segment, size, done, hash);
+- for_each_sg(sg_list, sg, sg_count, i) {
+- debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
+- sg->offset);
+- if (offset < sg->length) {
+- iscsi_tcp_segment_init_sg(segment, sg, offset);
+- return 0;
++ tcp_conn->in.offset += copylen;
++ tcp_conn->in.copy -= copylen;
++ if (copylen < hdr_remains) {
++ tcp_conn->in_progress = IN_PROGRESS_HEADER_GATHER;
++ tcp_conn->in.hdr_offset += copylen;
++ return -EAGAIN;
+ }
+- offset -= sg->length;
++ tcp_conn->in.hdr = &tcp_conn->hdr;
++ tcp_conn->discontiguous_hdr_cnt++;
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ }
+
+- return ISCSI_ERR_DATA_OFFSET;
+-}
+-
+-/**
+- * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
+- * @tcp_conn: iscsi connection to prep for
+- *
+- * This function always passes NULL for the hash argument, because when this
+- * function is called we do not yet know the final size of the header and want
+- * to delay the digest processing until we know that.
+- */
+-static void
+-iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+-{
+- debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
+- tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
+- iscsi_segment_init_linear(&tcp_conn->in.segment,
+- tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
+- iscsi_tcp_hdr_recv_done, NULL);
+-}
+-
+-/*
+- * Handle incoming reply to any other type of command
+- */
+-static int
+-iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- int rc = 0;
+-
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_DATA_DGST;
+-
+- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
+- conn->data, tcp_conn->in.datalen);
+- if (rc)
+- return rc;
+-
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+ }
+
+-static void
+-iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct hash_desc *rx_hash = NULL;
+-
+- if (conn->datadgst_en)
+- rx_hash = &tcp_conn->rx_hash;
+-
+- iscsi_segment_init_linear(&tcp_conn->in.segment,
+- conn->data, tcp_conn->in.datalen,
+- iscsi_tcp_data_recv_done, rx_hash);
+-}
+-
+ /*
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
++ struct scsi_cmnd *sc;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
+- if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
+- tcp_task->r2t = NULL;
+- }
++ sc = ctask->sc;
++ if (unlikely(!sc))
++ return;
++
++ tcp_ctask->xmstate = XMSTATE_IDLE;
++ tcp_ctask->r2t = NULL;
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
++ int rc;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc)
++ return rc;
++ /*
++ * setup Data-In byte counter (gets decremented..)
++ */
++ ctask->data_count = tcp_conn->in.datalen;
++
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (ctask->datasn != datasn)
+ return ISCSI_ERR_DATASN;
+- }
+
+- tcp_task->exp_datasn++;
++ ctask->datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
+- debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > ctask->total_length)
+ return ISCSI_ERR_DATA_OFFSET;
+- }
+
+ if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ struct scsi_cmnd *sc = ctask->sc;
++
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+- if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
+- ISCSI_FLAG_DATA_OVERFLOW)) {
++ if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+ if (res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
+- else
++ res_count <= sc->request_bufflen) {
++ sc->resid = res_count;
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ } else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
++ sc->resid = be32_to_cpu(rhdr->residual_count);
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ } else
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ }
+
+ conn->datain_pdus_cnt++;
+@@ -582,7 +266,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,10 +276,11 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ hdr = &r2t->dtask.hdr;
+ memset(hdr, 0, sizeof(struct iscsi_data));
+@@ -603,8 +288,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -619,57 +304,94 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ conn->dataout_pdus_cnt++;
+
+ r2t->sent = 0;
++
++ iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
++ sizeof(struct iscsi_hdr));
++
++ if (sc->use_sg) {
++ int i, sg_count = 0;
++ struct scatterlist *sg = sc->request_buffer;
++
++ r2t->sg = NULL;
++ for (i = 0; i < sc->use_sg; i++, sg += 1) {
++ /* FIXME: prefetch ? */
++ if (sg_count + sg->length > r2t->data_offset) {
++ int page_offset;
++
++ /* sg page found! */
++
++ /* offset within this page */
++ page_offset = r2t->data_offset - sg_count;
++
++ /* fill in this buffer */
++ iscsi_buf_init_sg(&r2t->sendbuf, sg);
++ r2t->sendbuf.sg.offset += page_offset;
++ r2t->sendbuf.sg.length -= page_offset;
++
++ /* xmit logic will continue with next one */
++ r2t->sg = sg + 1;
++ break;
++ }
++ sg_count += sg->length;
++ }
++ BUG_ON(r2t->sg == NULL);
++ } else {
++ iscsi_buf_init_iov(&r2t->sendbuf,
++ (char*)sc->request_buffer + r2t->data_offset,
++ r2t->data_count);
++ r2t->sg = NULL;
++ }
+ }
+
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ int rc;
+
+ if (tcp_conn->in.datalen) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2t with datalen %d\n",
+- tcp_conn->in.datalen);
++ printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
++ tcp_conn->in.datalen);
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
+ return ISCSI_ERR_R2TSN;
+- }
+
+- /* fill-in new R2T associated with the task */
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc)
++ return rc;
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+- iscsi_conn_printk(KERN_INFO, conn,
+- "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ /* FIXME: use R2TSN to detect missing R2T */
++
++ /* fill-in new R2T associated with the task */
++ spin_lock(&session->lock);
++ if (!ctask->sc || ctask->mtask ||
++ session->state != ISCSI_STATE_LOGGED_IN) {
++ printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
++ "recovery...\n", ctask->itt);
++ spin_unlock(&session->lock);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+ r2t->data_length = be32_to_cpu(rhdr->data_length);
+ if (r2t->data_length == 0) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
++ printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
++ spin_unlock(&session->lock);
+ return ISCSI_ERR_DATALEN;
+ }
+
+@@ -679,159 +401,122 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2T with data len %u at offset %u "
+- "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
++ if (r2t->data_offset + r2t->data_length > ctask->total_length) {
++ spin_unlock(&session->lock);
++ printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
++ "offset %u and total length %d\n", r2t->data_length,
++ r2t->data_offset, ctask->total_length);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+- conn->r2t_pdus_cnt++;
++ tcp_ctask->exp_r2tsn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
++ list_move_tail(&ctask->running, &conn->xmitqueue);
+
+- iscsi_requeue_task(task);
+- return 0;
+-}
+-
+-/*
+- * Handle incoming reply to DataIn command
+- */
+-static int
+-iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct iscsi_hdr *hdr = tcp_conn->in.hdr;
+- int rc;
+-
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_DATA_DGST;
+-
+- /* check for non-exceptional status */
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
+- if (rc)
+- return rc;
+- }
++ scsi_queue_work(session->host, &conn->xmitwork);
++ conn->r2t_pdus_cnt++;
++ spin_unlock(&session->lock);
+
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+ }
+
+-/**
+- * iscsi_tcp_hdr_dissect - process PDU header
+- * @conn: iSCSI connection
+- * @hdr: PDU header
+- *
+- * This function analyzes the header of the PDU received,
+- * and performs several sanity checks. If the PDU is accompanied
+- * by data, the receive buffer is set up to copy the incoming data
+- * to the correct location.
+- */
+ static int
+-iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
++iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_hdr *hdr;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ uint32_t cdgst, rdgst = 0, itt;
++
++ hdr = tcp_conn->in.hdr;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+ if (tcp_conn->in.datalen > conn->max_recv_dlength) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "iscsi_tcp: datalen %d > %d\n",
+- tcp_conn->in.datalen, conn->max_recv_dlength);
++ printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
++ tcp_conn->in.datalen, conn->max_recv_dlength);
+ return ISCSI_ERR_DATALEN;
+ }
++ tcp_conn->data_copied = 0;
+
+- /* Additional header segments. So far, we don't
+- * process additional headers.
+- */
++ /* read AHS */
+ ahslen = hdr->hlength << 2;
++ tcp_conn->in.offset += ahslen;
++ tcp_conn->in.copy -= ahslen;
++ if (tcp_conn->in.copy < 0) {
++ printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
++ "%d bytes\n", ahslen);
++ return ISCSI_ERR_AHSLEN;
++ }
++
++ /* calculate read padding */
++ tcp_conn->in.padding = tcp_conn->in.datalen & (ISCSI_PAD_LEN-1);
++ if (tcp_conn->in.padding) {
++ tcp_conn->in.padding = ISCSI_PAD_LEN - tcp_conn->in.padding;
++ debug_scsi("read padding %d bytes\n", tcp_conn->in.padding);
++ }
++
++ if (conn->hdrdgst_en) {
++ struct scatterlist sg;
++
++ sg_init_one(&sg, (u8 *)hdr,
++ sizeof(struct iscsi_hdr) + ahslen);
++ crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
++ (u8 *)&cdgst);
++ rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
++ ahslen);
++ if (cdgst != rdgst) {
++ printk(KERN_ERR "iscsi_tcp: hdrdgst error "
++ "recv 0x%x calc 0x%x\n", rdgst, cdgst);
++ return ISCSI_ERR_HDR_DGST;
++ }
++ }
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
++ rc = iscsi_verify_itt(conn, hdr, &itt);
++ if (rc == ISCSI_ERR_NO_SCSI_CMD) {
++ tcp_conn->in.datalen = 0; /* force drop */
++ return 0;
++ } else if (rc)
+ return rc;
+
+- debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
+- opcode, ahslen, tcp_conn->in.datalen);
++ debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
++ opcode, tcp_conn->in.offset, tcp_conn->in.copy,
++ ahslen, tcp_conn->in.datalen);
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
+- if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+-
+- /*
+- * Setup copy of Data-In into the Scsi_Cmnd
+- * Scatterlist case:
+- * We set up the iscsi_segment to point to the next
+- * scatterlist entry to copy to. As we go along,
+- * we move on to the next scatterlist entry and
+- * update the digest per-entry.
+- */
+- if (conn->datadgst_en)
+- rx_hash = &tcp_conn->rx_hash;
+-
+- debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+- "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
++ tcp_conn->in.ctask = session->cmds[itt];
++ rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
++ if (rc)
+ return rc;
+- }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+- if (tcp_conn->in.datalen) {
+- iscsi_tcp_data_recv_prep(tcp_conn);
+- return 0;
+- }
+- rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
++ tcp_conn->in.ctask = session->cmds[itt];
++ if (tcp_conn->in.datalen)
++ goto copy_hdr;
++
++ spin_lock(&session->lock);
++ rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
++ spin_unlock(&session->lock);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ tcp_conn->in.ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
++ else if (tcp_conn->in.ctask->sc->sc_data_direction ==
++ DMA_TO_DEVICE)
++ rc = iscsi_r2t_rsp(conn, tcp_conn->in.ctask);
+ else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -842,24 +527,18 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ * than 8K, but there are no targets that currently do this.
+ * For now we fail until we find a vendor that needs it
+ */
+- if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "iscsi_tcp: received buffer of "
+- "len %u but conn buffer is only %u "
+- "(opcode %0x)\n",
+- tcp_conn->in.datalen,
+- ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
++ if (ISCSI_DEF_MAX_RECV_SEG_LEN <
++ tcp_conn->in.datalen) {
++ printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
++ "but conn buffer is only %u (opcode %0x)\n",
++ tcp_conn->in.datalen,
++ ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+- /* If there's data coming in with the response,
+- * receive it to the connection's buffer.
+- */
+- if (tcp_conn->in.datalen) {
+- iscsi_tcp_data_recv_prep(tcp_conn);
+- return 0;
+- }
++ if (tcp_conn->in.datalen)
++ goto copy_hdr;
+ /* fall through */
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_NOOP_IN:
+@@ -871,161 +550,457 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ break;
+ }
+
+- if (rc == 0) {
+- /* Anything that comes with data should have
+- * been handled above. */
+- if (tcp_conn->in.datalen)
+- return ISCSI_ERR_PROTO;
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
++ return rc;
++
++copy_hdr:
++ /*
++ * if we did zero copy for the header but we will need multiple
++ * skbs to complete the command then we have to copy the header
++ * for later use
++ */
++ if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
++ (tcp_conn->in.datalen + tcp_conn->in.padding +
++ (conn->datadgst_en ? 4 : 0))) {
++ debug_tcp("Copying header for later use. in.copy %d in.datalen"
++ " %d\n", tcp_conn->in.copy, tcp_conn->in.datalen);
++ memcpy(&tcp_conn->hdr, tcp_conn->in.hdr,
++ sizeof(struct iscsi_hdr));
++ tcp_conn->in.hdr = &tcp_conn->hdr;
++ tcp_conn->in.zero_copy_hdr = 0;
+ }
++ return 0;
++}
+
+- return rc;
++/**
++ * iscsi_ctask_copy - copy skb bits to the destanation cmd task
++ * @conn: iscsi tcp connection
++ * @ctask: scsi command task
++ * @buf: buffer to copy to
++ * @buf_size: size of buffer
++ * @offset: offset within the buffer
++ *
++ * Notes:
++ * The function calls skb_copy_bits() and updates per-connection and
++ * per-cmd byte counters.
++ *
++ * Read counters (in bytes):
++ *
++ * conn->in.offset offset within in progress SKB
++ * conn->in.copy left to copy from in progress SKB
++ * including padding
++ * conn->in.copied copied already from in progress SKB
++ * conn->data_copied copied already from in progress buffer
++ * ctask->sent total bytes sent up to the MidLayer
++ * ctask->data_count left to copy from in progress Data-In
++ * buf_left left to copy from in progress buffer
++ **/
++static inline int
++iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
++ void *buf, int buf_size, int offset)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int buf_left = buf_size - (tcp_conn->data_copied + offset);
++ int size = min(tcp_conn->in.copy, buf_left);
++ int rc;
++
++ size = min(size, ctask->data_count);
++
++ debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
++ size, tcp_conn->in.offset, tcp_conn->in.copied);
++
++ BUG_ON(size <= 0);
++ BUG_ON(tcp_ctask->sent + size > ctask->total_length);
++
++ rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
++ (char*)buf + (offset + tcp_conn->data_copied), size);
++ /* must fit into skb->len */
++ BUG_ON(rc);
++
++ tcp_conn->in.offset += size;
++ tcp_conn->in.copy -= size;
++ tcp_conn->in.copied += size;
++ tcp_conn->data_copied += size;
++ tcp_ctask->sent += size;
++ ctask->data_count -= size;
++
++ BUG_ON(tcp_conn->in.copy < 0);
++ BUG_ON(ctask->data_count < 0);
++
++ if (buf_size != (tcp_conn->data_copied + offset)) {
++ if (!ctask->data_count) {
++ BUG_ON(buf_size - tcp_conn->data_copied < 0);
++ /* done with this PDU */
++ return buf_size - tcp_conn->data_copied;
++ }
++ return -EAGAIN;
++ }
++
++ /* done with this buffer or with both - PDU and buffer */
++ tcp_conn->data_copied = 0;
++ return 0;
+ }
+
+ /**
+- * iscsi_tcp_hdr_recv_done - process PDU header
++ * iscsi_tcp_copy - copy skb bits to the destanation buffer
++ * @conn: iscsi tcp connection
+ *
+- * This is the callback invoked when the PDU header has
+- * been received. If the header is followed by additional
+- * header segments, we go back for more data.
+- */
+-static int
+-iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
++ * Notes:
++ * The function calls skb_copy_bits() and updates per-connection
++ * byte counters.
++ **/
++static inline int
++iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
+ {
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct iscsi_hdr *hdr;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int buf_left = buf_size - tcp_conn->data_copied;
++ int size = min(tcp_conn->in.copy, buf_left);
++ int rc;
++
++ debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
++ size, tcp_conn->in.offset, tcp_conn->data_copied);
++ BUG_ON(size <= 0);
++
++ rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
++ (char*)conn->data + tcp_conn->data_copied, size);
++ BUG_ON(rc);
++
++ tcp_conn->in.offset += size;
++ tcp_conn->in.copy -= size;
++ tcp_conn->in.copied += size;
++ tcp_conn->data_copied += size;
++
++ if (buf_size != tcp_conn->data_copied)
++ return -EAGAIN;
++
++ return 0;
++}
++
++static inline void
++partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
++ int offset, int length)
++{
++ struct scatterlist temp;
+
+- /* Check if there are additional header segments
+- * *prior* to computing the digest, because we
+- * may need to go back to the caller for more.
++ memcpy(&temp, sg, sizeof(struct scatterlist));
++ temp.offset = offset;
++ temp.length = length;
++ crypto_hash_update(desc, &temp, length);
++}
++
++static void
++iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
++{
++ struct scatterlist tmp;
++
++ sg_init_one(&tmp, buf, len);
++ crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
++}
++
++static int iscsi_scsi_data_in(struct iscsi_conn *conn)
++{
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ struct iscsi_cmd_task *ctask = tcp_conn->in.ctask;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
++ struct scatterlist *sg;
++ int i, offset, rc = 0;
++
++ BUG_ON((void*)ctask != sc->SCp.ptr);
++
++ /*
++ * copying Data-In into the Scsi_Cmnd
+ */
+- hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
+- if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
+- /* Bump the header length - the caller will
+- * just loop around and get the AHS for us, and
+- * call again. */
+- unsigned int ahslen = hdr->hlength << 2;
+-
+- /* Make sure we don't overflow */
+- if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
+- return ISCSI_ERR_AHSLEN;
+-
+- segment->total_size += ahslen;
+- segment->size += ahslen;
+- return 0;
++ if (!sc->use_sg) {
++ i = ctask->data_count;
++ rc = iscsi_ctask_copy(tcp_conn, ctask, sc->request_buffer,
++ sc->request_bufflen,
++ tcp_ctask->data_offset);
++ if (rc == -EAGAIN)
++ return rc;
++ if (conn->datadgst_en)
++ iscsi_recv_digest_update(tcp_conn, sc->request_buffer,
++ i);
++ rc = 0;
++ goto done;
+ }
+
+- /* We're done processing the header. See if we're doing
+- * header digests; if so, set up the recv_digest buffer
+- * and go back for more. */
+- if (conn->hdrdgst_en) {
+- if (segment->digest_len == 0) {
+- iscsi_tcp_segment_splice_digest(segment,
+- segment->recv_digest);
+- return 0;
++ offset = tcp_ctask->data_offset;
++ sg = sc->request_buffer;
++
++ if (tcp_ctask->data_offset)
++ for (i = 0; i < tcp_ctask->sg_count; i++)
++ offset -= sg[i].length;
++ /* we've passed through partial sg*/
++ if (offset < 0)
++ offset = 0;
++
++ for (i = tcp_ctask->sg_count; i < sc->use_sg; i++) {
++ char *dest;
++
++ dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
++ rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
++ sg[i].length, offset);
++ kunmap_atomic(dest, KM_SOFTIRQ0);
++ if (rc == -EAGAIN)
++ /* continue with the next SKB/PDU */
++ return rc;
++ if (!rc) {
++ if (conn->datadgst_en) {
++ if (!offset)
++ crypto_hash_update(
++ &tcp_conn->rx_hash,
++ &sg[i], sg[i].length);
++ else
++ partial_sg_digest_update(
++ &tcp_conn->rx_hash,
++ &sg[i],
++ sg[i].offset + offset,
++ sg[i].length - offset);
++ }
++ offset = 0;
++ tcp_ctask->sg_count++;
+ }
+- iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
+- segment->total_copied - ISCSI_DIGEST_SIZE,
+- segment->digest);
+
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_HDR_DGST;
++ if (!ctask->data_count) {
++ if (rc && conn->datadgst_en)
++ /*
++ * data-in is complete, but buffer not...
++ */
++ partial_sg_digest_update(&tcp_conn->rx_hash,
++ &sg[i],
++ sg[i].offset,
++ sg[i].length-rc);
++ rc = 0;
++ break;
++ }
++
++ if (!tcp_conn->in.copy)
++ return -EAGAIN;
+ }
++ BUG_ON(ctask->data_count);
+
+- tcp_conn->in.hdr = hdr;
+- return iscsi_tcp_hdr_dissect(conn, hdr);
++done:
++ /* check for non-exceptional status */
++ if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
++ (long)sc, sc->result, ctask->itt,
++ tcp_conn->in.hdr->flags);
++ spin_lock(&conn->session->lock);
++ __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
++ spin_unlock(&conn->session->lock);
++ }
++
++ return rc;
++}
++
++static int
++iscsi_data_recv(struct iscsi_conn *conn)
++{
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int rc = 0, opcode;
++
++ opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
++ switch (opcode) {
++ case ISCSI_OP_SCSI_DATA_IN:
++ rc = iscsi_scsi_data_in(conn);
++ break;
++ case ISCSI_OP_SCSI_CMD_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_ASYNC_EVENT:
++ case ISCSI_OP_REJECT:
++ /*
++ * Collect data segment to the connection's data
++ * placeholder
++ */
++ if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
++ rc = -EAGAIN;
++ goto exit;
++ }
++
++ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
++ tcp_conn->in.datalen);
++ if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
++ iscsi_recv_digest_update(tcp_conn, conn->data,
++ tcp_conn->in.datalen);
++ break;
++ default:
++ BUG_ON(1);
++ }
++exit:
++ return rc;
+ }
+
+ /**
+- * iscsi_tcp_recv - TCP receive in sendfile fashion
++ * iscsi_tcp_data_recv - TCP receive in sendfile fashion
+ * @rd_desc: read descriptor
+ * @skb: socket buffer
+ * @offset: offset in skb
+ * @len: skb->len - offset
+ **/
+ static int
+-iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+- unsigned int offset, size_t len)
++iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
++ unsigned int offset, size_t len)
+ {
++ int rc;
+ struct iscsi_conn *conn = rd_desc->arg.data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->in.segment;
+- struct skb_seq_state seq;
+- unsigned int consumed = 0;
+- int rc = 0;
++ int processed;
++ char pad[ISCSI_PAD_LEN];
++ struct scatterlist sg;
+
+- debug_tcp("in %d bytes\n", skb->len - offset);
++ /*
++ * Save current SKB and its offset in the corresponding
++ * connection context.
++ */
++ tcp_conn->in.copy = skb->len - offset;
++ tcp_conn->in.offset = offset;
++ tcp_conn->in.skb = skb;
++ tcp_conn->in.len = tcp_conn->in.copy;
++ BUG_ON(tcp_conn->in.copy <= 0);
++ debug_tcp("in %d bytes\n", tcp_conn->in.copy);
++
++more:
++ tcp_conn->in.copied = 0;
++ rc = 0;
+
+ if (unlikely(conn->suspend_rx)) {
+ debug_tcp("conn %d Rx suspended!\n", conn->id);
+ return 0;
+ }
+
+- skb_prepare_seq_read(skb, offset, skb->len, &seq);
+- while (1) {
+- unsigned int avail;
+- const u8 *ptr;
++ if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
++ tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
++ rc = iscsi_hdr_extract(tcp_conn);
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto nomore;
++ else {
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++ }
+
+- avail = skb_seq_read(consumed, &ptr, &seq);
+- if (avail == 0) {
+- debug_tcp("no more data avail. Consumed %d\n",
+- consumed);
+- break;
++ /*
++ * Verify and process incoming PDU header.
++ */
++ rc = iscsi_tcp_hdr_recv(conn);
++ if (!rc && tcp_conn->in.datalen) {
++ if (conn->datadgst_en)
++ crypto_hash_init(&tcp_conn->rx_hash);
++ tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
++ } else if (rc) {
++ iscsi_conn_failure(conn, rc);
++ return 0;
+ }
+- BUG_ON(segment->copied >= segment->size);
+-
+- debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
+- rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
+- BUG_ON(rc == 0);
+- consumed += rc;
+-
+- if (segment->total_copied >= segment->total_size) {
+- debug_tcp("segment done\n");
+- rc = segment->done(tcp_conn, segment);
+- if (rc != 0) {
+- skb_abort_seq_read(&seq);
+- goto error;
+- }
++ }
++
++ if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
++ uint32_t recv_digest;
+
+- /* The done() functions sets up the
+- * next segment. */
++ debug_tcp("extra data_recv offset %d copy %d\n",
++ tcp_conn->in.offset, tcp_conn->in.copy);
++ rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto again;
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++
++ memcpy(&recv_digest, conn->data, sizeof(uint32_t));
++ if (recv_digest != tcp_conn->in.datadgst) {
++ debug_tcp("iscsi_tcp: data digest error!"
++ "0x%x != 0x%x\n", recv_digest,
++ tcp_conn->in.datadgst);
++ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
++ return 0;
++ } else {
++ debug_tcp("iscsi_tcp: data digest match!"
++ "0x%x == 0x%x\n", recv_digest,
++ tcp_conn->in.datadgst);
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ }
+ }
+- skb_abort_seq_read(&seq);
+- conn->rxdata_octets += consumed;
+- return consumed;
+
+-error:
+- debug_tcp("Error receiving PDU, errno=%d\n", rc);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- return 0;
++ if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV &&
++ tcp_conn->in.copy) {
++
++ debug_tcp("data_recv offset %d copy %d\n",
++ tcp_conn->in.offset, tcp_conn->in.copy);
++
++ rc = iscsi_data_recv(conn);
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto again;
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++ tcp_conn->in.copy -= tcp_conn->in.padding;
++ tcp_conn->in.offset += tcp_conn->in.padding;
++ if (conn->datadgst_en) {
++ if (tcp_conn->in.padding) {
++ debug_tcp("padding -> %d\n",
++ tcp_conn->in.padding);
++ memset(pad, 0, tcp_conn->in.padding);
++ sg_init_one(&sg, pad, tcp_conn->in.padding);
++ crypto_hash_update(&tcp_conn->rx_hash,
++ &sg, sg.length);
++ }
++ crypto_hash_final(&tcp_conn->rx_hash,
++ (u8 *) &tcp_conn->in.datadgst);
++ debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
++ tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
++ tcp_conn->data_copied = 0;
++ } else
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++ }
++
++ debug_tcp("f, processed %d from out of %d padding %d\n",
++ tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding);
++ BUG_ON(tcp_conn->in.offset - offset > len);
++
++ if (tcp_conn->in.offset - offset != len) {
++ debug_tcp("continue to process %d bytes\n",
++ (int)len - (tcp_conn->in.offset - offset));
++ goto more;
++ }
++
++nomore:
++ processed = tcp_conn->in.offset - offset;
++ BUG_ON(processed == 0);
++ return processed;
++
++again:
++ processed = tcp_conn->in.offset - offset;
++ debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
++ processed, (int)len, (int)rd_desc->count);
++ BUG_ON(processed == 0);
++ BUG_ON(processed > len);
++
++ conn->rxdata_octets += processed;
++ return processed;
+ }
+
+ static void
+ iscsi_tcp_data_ready(struct sock *sk, int flag)
+ {
+ struct iscsi_conn *conn = sk->sk_user_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ read_descriptor_t rd_desc;
+
+ read_lock(&sk->sk_callback_lock);
+
+ /*
+- * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
++ * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv.
+ * We set count to 1 because we want the network layer to
+- * hand us all the skbs that are available. iscsi_tcp_recv
++ * hand us all the skbs that are available. iscsi_tcp_data_recv
+ * handled pdus that cross buffers or pdus that still need data.
+ */
+ rd_desc.arg.data = conn;
+ rd_desc.count = 1;
+- tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
++ tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
+
+ read_unlock(&sk->sk_callback_lock);
+-
+- /* If we had to (atomically) map a highmem page,
+- * unmap it now. */
+- iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+ }
+
+ static void
+@@ -1105,179 +1080,127 @@ iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
+ }
+
+ /**
+- * iscsi_xmit - TCP transmit
+- **/
+-static int
+-iscsi_xmit(struct iscsi_conn *conn)
++ * iscsi_send - generic send routine
++ * @sk: kernel's socket
++ * @buf: buffer to write from
++ * @size: actual size to write
++ * @flags: socket's flags
++ */
++static inline int
++iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->out.segment;
+- unsigned int consumed = 0;
+- int rc = 0;
+-
+- while (1) {
+- rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
+- if (rc < 0)
+- goto error;
+- if (rc == 0)
+- break;
+-
+- consumed += rc;
++ struct socket *sk = tcp_conn->sock;
++ int offset = buf->sg.offset + buf->sent, res;
+
+- if (segment->total_copied >= segment->total_size) {
+- if (segment->done != NULL) {
+- rc = segment->done(tcp_conn, segment);
+- if (rc < 0)
+- goto error;
+- }
+- }
++ /*
++ * if we got use_sg=0 or are sending something we kmallocd
++ * then we did not have to do kmap (kmap returns page_address)
++ *
++ * if we got use_sg > 0, but had to drop down, we do not
++ * set clustering so this should only happen for that
++ * slab case.
++ */
++ if (buf->use_sendmsg)
++ res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
++ else
++ res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
++
++ if (res >= 0) {
++ conn->txdata_octets += res;
++ buf->sent += res;
++ return res;
+ }
+
+- debug_tcp("xmit %d bytes\n", consumed);
+-
+- conn->txdata_octets += consumed;
+- return consumed;
+-
+-error:
+- /* Transmit error. We could initiate error recovery
+- * here. */
+- debug_tcp("Error sending PDU, errno=%d\n", rc);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- return rc;
++ tcp_conn->sendpage_failures_cnt++;
++ if (res == -EAGAIN)
++ res = -ENOBUFS;
++ else
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return res;
+ }
+
+ /**
+- * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+- */
+-static inline int
+-iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
+-{
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->out.segment;
+-
+- return segment->total_copied - segment->total_size;
+-}
+-
++ * iscsi_sendhdr - send PDU Header via tcp_sendpage()
++ * @conn: iscsi connection
++ * @buf: buffer to write from
++ * @datalen: lenght of data to be sent after the header
++ *
++ * Notes:
++ * (Tx, Fast Path)
++ **/
+ static inline int
+-iscsi_tcp_flush(struct iscsi_conn *conn)
++iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
+ {
+- int rc;
+-
+- while (iscsi_tcp_xmit_qlen(conn)) {
+- rc = iscsi_xmit(conn);
+- if (rc == 0)
++ int flags = 0; /* MSG_DONTWAIT; */
++ int res, size;
++
++ size = buf->sg.length - buf->sent;
++ BUG_ON(buf->sent + size > buf->sg.length);
++ if (buf->sent + size != buf->sg.length || datalen)
++ flags |= MSG_MORE;
++
++ res = iscsi_send(conn, buf, size, flags);
++ debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
++ if (res >= 0) {
++ if (size != res)
+ return -EAGAIN;
+- if (rc < 0)
+- return rc;
++ return 0;
+ }
+
+- return 0;
+-}
+-
+-/*
+- * This is called when we're done sending the header.
+- * Simply copy the data_segment to the send segment, and return.
+- */
+-static int
+-iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- tcp_conn->out.segment = tcp_conn->out.data_segment;
+- debug_tcp("Header done. Next segment size %u total_size %u\n",
+- tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
+- return 0;
++ return res;
+ }
+
+-static void
+-iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
++/**
++ * iscsi_sendpage - send one page of iSCSI Data-Out.
++ * @conn: iscsi connection
++ * @buf: buffer to write from
++ * @count: remaining data
++ * @sent: number of bytes sent
++ *
++ * Notes:
++ * (Tx, Fast Path)
++ **/
++static inline int
++iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
++ int *count, int *sent)
+ {
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
+- conn->hdrdgst_en? ", digest enabled" : "");
+-
+- /* Clear the data segment - needs to be filled in by the
+- * caller using iscsi_tcp_send_data_prep() */
+- memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
+-
+- /* If header digest is enabled, compute the CRC and
+- * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
+- * sufficient room.
+- */
+- if (conn->hdrdgst_en) {
+- iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
+- hdr + hdrlen);
+- hdrlen += ISCSI_DIGEST_SIZE;
++ int flags = 0; /* MSG_DONTWAIT; */
++ int res, size;
++
++ size = buf->sg.length - buf->sent;
++ BUG_ON(buf->sent + size > buf->sg.length);
++ if (size > *count)
++ size = *count;
++ if (buf->sent + size != buf->sg.length || *count != size)
++ flags |= MSG_MORE;
++
++ res = iscsi_send(conn, buf, size, flags);
++ debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
++ size, buf->sent, *count, *sent, res);
++ if (res >= 0) {
++ *count -= res;
++ *sent += res;
++ if (size != res)
++ return -EAGAIN;
++ return 0;
+ }
+
+- /* Remember header pointer for later, when we need
+- * to decide whether there's a payload to go along
+- * with the header. */
+- tcp_conn->out.hdr = hdr;
+-
+- iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
+- iscsi_tcp_send_hdr_done, NULL);
++ return res;
+ }
+
+-/*
+- * Prepare the send buffer for the payload data.
+- * Padding and checksumming will all be taken care
+- * of by the iscsi_segment routines.
+- */
+-static int
+-iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+- unsigned int count, unsigned int offset,
+- unsigned int len)
+-{
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct hash_desc *tx_hash = NULL;
+- unsigned int hdr_spec_len;
+-
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
+- tcp_conn, offset, len,
+- conn->datadgst_en? ", digest enabled" : "");
+-
+- /* Make sure the datalen matches what the caller
+- said he would send. */
+- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+- WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+-
+- if (conn->datadgst_en)
+- tx_hash = &tcp_conn->tx_hash;
+-
+- return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
+- sg, count, offset, len,
+- NULL, tx_hash);
+-}
+-
+-static void
+-iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+- size_t len)
++static inline void
++iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
++ struct iscsi_tcp_cmd_task *tcp_ctask)
+ {
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct hash_desc *tx_hash = NULL;
+- unsigned int hdr_spec_len;
+-
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
+- conn->datadgst_en? ", digest enabled" : "");
+-
+- /* Make sure the datalen matches what the caller
+- said he would send. */
+- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+- WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+-
+- if (conn->datadgst_en)
+- tx_hash = &tcp_conn->tx_hash;
+-
+- iscsi_segment_init_linear(&tcp_conn->out.data_segment,
+- data, len, NULL, tx_hash);
++ crypto_hash_init(&tcp_conn->tx_hash);
++ tcp_ctask->digest_count = 4;
+ }
+
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1287,17 +1210,13 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ *
+ * Called under connection lock.
+ **/
+-static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+- struct iscsi_r2t_info *r2t)
++static void
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ struct iscsi_r2t_info *r2t, int left)
+ {
+ struct iscsi_data *hdr;
+- int new_offset, left;
+-
+- BUG_ON(r2t->data_length - r2t->sent < 0);
+- left = r2t->data_length - r2t->sent;
+- if (left == 0)
+- return 0;
++ struct scsi_cmnd *sc = ctask->sc;
++ int new_offset;
+
+ hdr = &r2t->dtask.hdr;
+ memset(hdr, 0, sizeof(struct iscsi_data));
+@@ -1305,8 +1224,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1318,177 +1237,514 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ r2t->data_count = left;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+-
+ conn->dataout_pdus_cnt++;
+- return 1;
++
++ iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
++ sizeof(struct iscsi_hdr));
++
++ if (iscsi_buf_left(&r2t->sendbuf))
++ return;
++
++ if (sc->use_sg) {
++ iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
++ r2t->sg += 1;
++ } else {
++ iscsi_buf_init_iov(&r2t->sendbuf,
++ (char*)sc->request_buffer + new_offset,
++ r2t->data_count);
++ r2t->sg = NULL;
++ }
++}
++
++static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
++ unsigned long len)
++{
++ tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
++ if (!tcp_ctask->pad_count)
++ return;
++
++ tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
++ debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
++ tcp_ctask->xmstate |= XMSTATE_W_PAD;
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
++static void
++iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
++{
++ struct scsi_cmnd *sc = ctask->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++
++ tcp_ctask->sent = 0;
++ tcp_ctask->sg_count = 0;
++
++ if (sc->sc_data_direction == DMA_TO_DEVICE) {
++ tcp_ctask->xmstate = XMSTATE_W_HDR;
++ tcp_ctask->exp_r2tsn = 0;
++ BUG_ON(ctask->total_length == 0);
++
++ if (sc->use_sg) {
++ struct scatterlist *sg = sc->request_buffer;
++
++ iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
++ tcp_ctask->sg = sg + 1;
++ tcp_ctask->bad_sg = sg + sc->use_sg;
++ } else {
++ iscsi_buf_init_iov(&tcp_ctask->sendbuf,
++ sc->request_buffer,
++ sc->request_bufflen);
++ tcp_ctask->sg = NULL;
++ tcp_ctask->bad_sg = NULL;
++ }
++ debug_scsi("cmd [itt 0x%x total %d imm_data %d "
++ "unsol count %d, unsol offset %d]\n",
++ ctask->itt, ctask->total_length, ctask->imm_count,
++ ctask->unsol_count, ctask->unsol_offset);
++ } else
++ tcp_ctask->xmstate = XMSTATE_R_HDR;
++
++ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
++ sizeof(struct iscsi_hdr));
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ *
++ * Management xmit state machine consists of two states:
++ * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
++ * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
++ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
+- int err;
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++ int rc;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
++ debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
++ conn->id, tcp_mtask->xmstate, mtask->itt);
++
++ if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
++ if (mtask->data_count)
++ tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
++ if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
++ conn->stop_stage != STOP_CONN_RECOVER &&
++ conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
++ (u8*)tcp_mtask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
++ mtask->data_count);
++ if (rc) {
++ tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
++ if (mtask->data_count)
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
++ return rc;
++ }
++ }
++
++ if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
++ BUG_ON(!mtask->data_count);
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
++ /* FIXME: implement.
++ * Virtual buffer could be spreaded across multiple pages...
+ */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
++ do {
++ int rc;
++
++ rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
++ &mtask->data_count, &tcp_mtask->sent);
++ if (rc) {
++ tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
++ return rc;
++ }
++ } while (mtask->data_count);
++ }
+
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
++ BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE);
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
+
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
++ spin_lock_bh(&session->lock);
++ list_del(&conn->mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask,
++ sizeof(void*));
++ spin_unlock_bh(&session->lock);
++ }
++ return 0;
++}
++
++static inline int
++iscsi_send_read_hdr(struct iscsi_conn *conn,
++ struct iscsi_tcp_cmd_task *tcp_ctask)
++{
++ int rc;
++
++ tcp_ctask->xmstate &= ~XMSTATE_R_HDR;
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)tcp_ctask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, 0);
++ if (!rc) {
++ BUG_ON(tcp_ctask->xmstate != XMSTATE_IDLE);
++ return 0; /* wait for Data-In */
+ }
++ tcp_ctask->xmstate |= XMSTATE_R_HDR;
++ return rc;
++}
+
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++static inline int
++iscsi_send_write_hdr(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc;
+
+- /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ tcp_ctask->xmstate &= ~XMSTATE_W_HDR;
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)tcp_ctask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
++ if (rc) {
++ tcp_ctask->xmstate |= XMSTATE_W_HDR;
++ return rc;
++ }
+
+- if (!task->imm_count)
+- return 0;
++ if (ctask->imm_count) {
++ tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
++ iscsi_set_padding(tcp_ctask, ctask->imm_count);
+
+- /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
+- if (err)
+- return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ if (ctask->conn->datadgst_en) {
++ iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
++ tcp_ctask->immdigest = 0;
++ }
++ }
++
++ if (ctask->unsol_count)
++ tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
+ return 0;
+ }
+
+-/*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
+- *
+- * We're expected to return 0 when everything was transmitted succesfully,
+- * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+- * of error.
+- */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
+- int rc = 0;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int sent = 0, rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
++ iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
++ tcp_ctask->pad_count);
++ if (conn->datadgst_en)
++ crypto_hash_update(&tcp_conn->tx_hash,
++ &tcp_ctask->sendbuf.sg,
++ tcp_ctask->sendbuf.sg.length);
++ } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
++ return 0;
+
+-flush:
+- /* Flush any pending data first. */
+- rc = iscsi_tcp_flush(conn);
+- if (rc < 0)
+- return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
++ tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
++ debug_scsi("sending %d pad bytes for itt 0x%x\n",
++ tcp_ctask->pad_count, ctask->itt);
++ rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
++ &sent);
++ if (rc) {
++ debug_scsi("padding send failed %d\n", rc);
++ tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
++ }
++ return rc;
++}
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++static int
++iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ struct iscsi_buf *buf, uint32_t *digest)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask;
++ struct iscsi_tcp_conn *tcp_conn;
++ int rc, sent = 0;
++
++ if (!conn->datadgst_en)
+ return 0;
++
++ tcp_ctask = ctask->dd_data;
++ tcp_conn = conn->dd_data;
++
++ if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
++ crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
++ iscsi_buf_init_iov(buf, (char*)digest, 4);
+ }
++ tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
++
++ rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
++ if (!rc)
++ debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
++ ctask->itt);
++ else {
++ debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
++ *digest, ctask->itt);
++ tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
++ }
++ return rc;
++}
+
+- /* Are we done already? */
+- if (sc->sc_data_direction != DMA_TO_DEVICE)
+- return 0;
++static int
++iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
++ struct scatterlist **sg, int *sent, int *count,
++ struct iscsi_buf *digestbuf, uint32_t *digest)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int rc, buf_sent, offset;
++
++ while (*count) {
++ buf_sent = 0;
++ offset = sendbuf->sent;
++
++ rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
++ *sent = *sent + buf_sent;
++ if (buf_sent && conn->datadgst_en)
++ partial_sg_digest_update(&tcp_conn->tx_hash,
++ &sendbuf->sg, sendbuf->sg.offset + offset,
++ buf_sent);
++ if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
++ iscsi_buf_init_sg(sendbuf, *sg);
++ *sg = *sg + 1;
++ }
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (rc)
++ return rc;
++ }
+
+- /* Prepare a header for the unsolicited PDU.
+- * The amount of data we want to send will be
+- * in task->data_count.
+- * FIXME: return the data count instead.
+- */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ rc = iscsi_send_padding(conn, ctask);
++ if (rc)
++ return rc;
++
++ return iscsi_send_digest(conn, ctask, digestbuf, digest);
++}
++
++static int
++iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_data_task *dtask;
++ int rc;
++
++ tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
++ if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
++ dtask = &tcp_ctask->unsol_dtask;
++
++ iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
++ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
++ sizeof(struct iscsi_hdr));
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)dtask->hdrext);
++
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
++ iscsi_set_padding(tcp_ctask, ctask->data_count);
++ }
++
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
++ if (rc) {
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
++ tcp_ctask->xmstate |= XMSTATE_UNS_HDR;
++ return rc;
++ }
++
++ if (conn->datadgst_en) {
++ dtask = &tcp_ctask->unsol_dtask;
++ iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
++ dtask->digest = 0;
++ }
++
++ debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
++ ctask->itt, ctask->unsol_count, tcp_ctask->sent);
++ return 0;
++}
+
+- debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++static int
++iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc;
+
+- iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
++ BUG_ON(!ctask->unsol_count);
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
++send_hdr:
++ rc = iscsi_send_unsol_hdr(conn, ctask);
+ if (rc)
+- goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
+- goto flush;
+- } else {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_r2t_info *r2t;
++ return rc;
++ }
++
++ if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
++ struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
++ int start = tcp_ctask->sent;
+
+- /* All unsolicited PDUs sent. Check for solicited PDUs.
++ rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
++ &tcp_ctask->sent, &ctask->data_count,
++ &dtask->digestbuf, &dtask->digest);
++ ctask->unsol_count -= tcp_ctask->sent - start;
++ if (rc)
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
++ /*
++ * Done with the Data-Out. Next, check if we need
++ * to send another unsolicited Data-Out.
+ */
+- spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
+- if (r2t != NULL) {
+- /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
+- debug_scsi(" done with r2t %p\n", r2t);
+-
+- __kfifo_put(tcp_task->r2tpool.queue,
+- (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
+- }
++ if (ctask->unsol_count) {
++ debug_scsi("sending more uns\n");
++ tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
++ goto send_hdr;
+ }
++ }
++ return 0;
++}
+
+- if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_session *session = conn->session;
++ struct iscsi_r2t_info *r2t;
++ struct iscsi_data_task *dtask;
++ int left, rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ if (!tcp_ctask->r2t) {
++ spin_lock_bh(&session->lock);
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ spin_unlock_bh(&session->lock);
++ }
++send_hdr:
++ r2t = tcp_ctask->r2t;
++ dtask = &r2t->dtask;
++
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &r2t->headbuf,
++ (u8*)dtask->hdrext);
++ rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
++ if (rc) {
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
++ return rc;
+ }
+- spin_unlock_bh(&session->lock);
+
+- /* Waiting for more R2Ts to arrive. */
+- if (r2t == NULL) {
+- debug_tcp("no R2Ts yet\n");
+- return 0;
++ if (conn->datadgst_en) {
++ iscsi_data_digest_init(conn->dd_data, tcp_ctask);
++ dtask->digest = 0;
+ }
+
+- debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
+- r2t->data_offset + r2t->sent, r2t->data_count);
++ iscsi_set_padding(tcp_ctask, r2t->data_count);
++ debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
++ r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
++ r2t->sent);
++ }
+
+- iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+- sizeof(struct iscsi_hdr));
++ if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
++ r2t = tcp_ctask->r2t;
++ dtask = &r2t->dtask;
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
+- r2t->data_offset + r2t->sent,
+- r2t->data_count);
++ rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
++ &r2t->sent, &r2t->data_count,
++ &dtask->digestbuf, &dtask->digest);
+ if (rc)
+- goto fail;
+- tcp_task->sent += r2t->data_count;
+- r2t->sent += r2t->data_count;
+- goto flush;
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
++
++ /*
++ * Done with this Data-Out. Next, check if we have
++ * to send another Data-Out for this R2T.
++ */
++ BUG_ON(r2t->data_length - r2t->sent < 0);
++ left = r2t->data_length - r2t->sent;
++ if (left) {
++ iscsi_solicit_data_cont(conn, ctask, r2t, left);
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ goto send_hdr;
++ }
++
++ /*
++ * Done with this R2T. Check if there are more
++ * outstanding R2Ts ready to be processed.
++ */
++ spin_lock_bh(&session->lock);
++ tcp_ctask->r2t = NULL;
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
++ sizeof(void*));
++ if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
++ sizeof(void*))) {
++ tcp_ctask->r2t = r2t;
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ spin_unlock_bh(&session->lock);
++ goto send_hdr;
++ }
++ spin_unlock_bh(&session->lock);
+ }
+ return 0;
+-fail:
+- iscsi_conn_failure(conn, rc);
+- return -EIO;
++}
++
++static int
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc = 0;
++
++ debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
++ conn->id, tcp_ctask->xmstate, ctask->itt);
++
++ /*
++ * serialize with TMF AbortTask
++ */
++ if (ctask->mtask)
++ return rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_R_HDR)
++ return iscsi_send_read_hdr(conn, tcp_ctask);
++
++ if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
++ rc = iscsi_send_write_hdr(conn, ctask);
++ if (rc)
++ return rc;
++ }
++
++ if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
++ rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
++ &tcp_ctask->sent, &ctask->imm_count,
++ &tcp_ctask->immbuf, &tcp_ctask->immdigest);
++ if (rc)
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
++ }
++
++ rc = iscsi_send_unsol_pdu(conn, ctask);
++ if (rc)
++ return rc;
++
++ rc = iscsi_send_sol_pdu(conn, ctask);
++ if (rc)
++ return rc;
++
++ return rc;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -1498,7 +1754,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,31 +1764,45 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++ /* initial operational parameters */
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+- if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ if (IS_ERR(tcp_conn->tx_hash.tfm)) {
++ printk(KERN_ERR "Could not create connection due to crc32c "
++ "loading error %ld. Make sure the crc32c module is "
++ "built as a module or into the kernel\n",
++ PTR_ERR(tcp_conn->tx_hash.tfm));
++ goto free_tcp_conn;
++ }
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->rx_hash.flags = 0;
+- if (IS_ERR(tcp_conn->rx_hash.tfm))
++ if (IS_ERR(tcp_conn->rx_hash.tfm)) {
++ printk(KERN_ERR "Could not create connection due to crc32c "
++ "loading error %ld. Make sure the crc32c module is "
++ "built as a module or into the kernel\n",
++ PTR_ERR(tcp_conn->rx_hash.tfm));
+ goto free_tx_tfm;
++ }
+
+ return cls_conn;
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "Could not create connection due to crc32c "
+- "loading error. Make sure the crc32c "
+- "module is built as a module or into the "
+- "kernel\n");
++free_tcp_conn:
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1540,21 +1810,18 @@ free_conn:
+ static void
+ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+ {
+- struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct socket *sock = tcp_conn->sock;
+
+- if (!sock)
++ if (!tcp_conn->sock)
+ return;
+
+- sock_hold(sock->sk);
++ sock_hold(tcp_conn->sock->sk);
+ iscsi_conn_restore_callbacks(tcp_conn);
+- sock_put(sock->sk);
++ sock_put(tcp_conn->sock->sk);
+
+- spin_lock_bh(&session->lock);
++ sock_release(tcp_conn->sock);
+ tcp_conn->sock = NULL;
+- spin_unlock_bh(&session->lock);
+- sockfd_put(sock);
++ conn->recv_lock = NULL;
+ }
+
+ static void
+@@ -1564,13 +1831,14 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+@@ -1579,60 +1847,9 @@ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+-
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+-}
+-
+-static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
+- char *buf, int *port,
+- int (*getname)(struct socket *, struct sockaddr *,
+- int *addrlen))
+-{
+- struct sockaddr_storage *addr;
+- struct sockaddr_in6 *sin6;
+- struct sockaddr_in *sin;
+- int rc = 0, len;
+-
+- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+- if (!addr)
+- return -ENOMEM;
+-
+- if (getname(sock, (struct sockaddr *) addr, &len)) {
+- rc = -ENODEV;
+- goto free_addr;
+- }
+-
+- switch (addr->ss_family) {
+- case AF_INET:
+- sin = (struct sockaddr_in *)addr;
+- spin_lock_bh(&conn->session->lock);
+- sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
+- *port = be16_to_cpu(sin->sin_port);
+- spin_unlock_bh(&conn->session->lock);
+- break;
+- case AF_INET6:
+- sin6 = (struct sockaddr_in6 *)addr;
+- spin_lock_bh(&conn->session->lock);
+- sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
+- *port = be16_to_cpu(sin6->sin6_port);
+- spin_unlock_bh(&conn->session->lock);
+- break;
+- }
+-free_addr:
+- kfree(addr);
+- return rc;
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
+ }
+
+ static int
+@@ -1640,8 +1857,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1651,28 +1866,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ /* lookup for existing socket */
+ sock = sockfd_lookup((int)transport_eph, &err);
+ if (!sock) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "sockfd_lookup failed %d\n", err);
++ printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
+ return -EEXIST;
+ }
+- /*
+- * copy these values now because if we drop the session
+- * userspace may still want to query the values since we will
+- * be using them for the reconnect
+- */
+- err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
+- &conn->portal_port, kernel_getpeername);
+- if (err)
+- goto free_socket;
+-
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
+- if (err)
+- goto free_socket;
+
+ err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (err)
+- goto free_socket;
++ return err;
+
+ /* bind iSCSI connection and socket */
+ tcp_conn->sock = sock;
+@@ -1683,17 +1883,38 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+ * set receive state machine into initial state
+ */
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++
+ return 0;
++}
+
+-free_socket:
+- sockfd_put(sock);
+- return err;
++/* called with host lock */
++static void
++iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++
++ iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
++ sizeof(struct iscsi_hdr));
++ tcp_mtask->xmstate = XMSTATE_IMM_HDR;
++ tcp_mtask->sent = 0;
++
++ if (mtask->data_count)
++ iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
++ mtask->data_count);
+ }
+
+ static int
+@@ -1706,8 +1927,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1937,18 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4,
++ (void***)&tcp_ctask->r2ts,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1957,12 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1973,12 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ }
+ }
+
+@@ -1769,6 +1994,9 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
+ switch(param) {
+ case ISCSI_PARAM_HDRDGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
++ if (conn->hdrdgst_en)
++ tcp_conn->hdr_size += sizeof(__u32);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
+@@ -1777,12 +2005,12 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ sscanf(buf, "%d", &value);
+- if (value <= 0 || !is_power_of_2(value))
+- return -EINVAL;
+- if (session->max_r2t == value)
++ if (session->max_r2t == roundup_pow_of_two(value))
+ break;
+ iscsi_r2tpool_free(session);
+ iscsi_set_param(cls_conn, param, buf, buflen);
++ if (session->max_r2t & (session->max_r2t - 1))
++ session->max_r2t = roundup_pow_of_two(session->max_r2t);
+ if (iscsi_r2tpool_alloc(session))
+ return -ENOMEM;
+ break;
+@@ -1798,18 +2026,41 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ struct inet_sock *inet;
++ struct ipv6_pinfo *np;
++ struct sock *sk;
+ int len;
+
+ switch(param) {
+ case ISCSI_PARAM_CONN_PORT:
+- spin_lock_bh(&conn->session->lock);
+- len = sprintf(buf, "%hu\n", conn->portal_port);
+- spin_unlock_bh(&conn->session->lock);
++ mutex_lock(&conn->xmitmutex);
++ if (!tcp_conn->sock) {
++ mutex_unlock(&conn->xmitmutex);
++ return -EINVAL;
++ }
++
++ inet = inet_sk(tcp_conn->sock->sk);
++ len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
++ mutex_unlock(&conn->xmitmutex);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+- spin_lock_bh(&conn->session->lock);
+- len = sprintf(buf, "%s\n", conn->portal_address);
+- spin_unlock_bh(&conn->session->lock);
++ mutex_lock(&conn->xmitmutex);
++ if (!tcp_conn->sock) {
++ mutex_unlock(&conn->xmitmutex);
++ return -EINVAL;
++ }
++
++ sk = tcp_conn->sock->sk;
++ if (sk->sk_family == PF_INET) {
++ inet = inet_sk(sk);
++ len = sprintf(buf, NIPQUAD_FMT "\n",
++ NIPQUAD(inet->daddr));
++ } else {
++ np = inet6_sk(sk);
++ len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr));
++ }
++ mutex_unlock(&conn->xmitmutex);
+ break;
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+@@ -1843,93 +2094,65 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ ctask->hdr = &tcp_ctask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++
++ mtask->hdr = &tcp_mtask->hdr;
++ }
++
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
+-
+-static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+-{
+- blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
+- blk_queue_dma_alignment(sdev->request_queue, 0);
+- return 0;
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static struct scsi_host_template iscsi_sht = {
+- .module = THIS_MODULE,
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+- .sg_tablesize = 4096,
++ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
++ .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .max_sectors = 0xFFFF,
+ .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+- .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+- .slave_configure = iscsi_tcp_slave_configure,
+ .proc_name = "iscsi_tcp",
+ .this_id = -1,
+ };
+@@ -1956,16 +2179,12 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+- ISCSI_TARGET_NAME | ISCSI_TPGT |
+- ISCSI_USERNAME | ISCSI_PASSWORD |
+- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+- ISCSI_HOST_INITIATOR_NAME |
+- ISCSI_HOST_NETDEV_NAME,
++ ISCSI_TARGET_NAME |
++ ISCSI_TPGT,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1978,15 +2197,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+- /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
+- .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_cmd_init,
++ .init_mgmt_task = iscsi_tcp_mgmt_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2217,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..3273683 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -24,61 +24,68 @@
+
+ #include <scsi/libiscsi.h>
+
++/* Socket's Receive state machine */
++#define IN_PROGRESS_WAIT_HEADER 0x0
++#define IN_PROGRESS_HEADER_GATHER 0x1
++#define IN_PROGRESS_DATA_RECV 0x2
++#define IN_PROGRESS_DDIGEST_RECV 0x3
++
++/* xmit state machine */
++#define XMSTATE_IDLE 0x0
++#define XMSTATE_R_HDR 0x1
++#define XMSTATE_W_HDR 0x2
++#define XMSTATE_IMM_HDR 0x4
++#define XMSTATE_IMM_DATA 0x8
++#define XMSTATE_UNS_INIT 0x10
++#define XMSTATE_UNS_HDR 0x20
++#define XMSTATE_UNS_DATA 0x40
++#define XMSTATE_SOL_HDR 0x80
++#define XMSTATE_SOL_DATA 0x100
++#define XMSTATE_W_PAD 0x200
++#define XMSTATE_W_RESEND_PAD 0x400
++#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
++
++#define ISCSI_PAD_LEN 4
++#define ISCSI_SG_TABLESIZE SG_ALL
++#define ISCSI_TCP_MAX_CMD_LEN 16
++
+ struct crypto_hash;
+ struct socket;
+-struct iscsi_tcp_conn;
+-struct iscsi_segment;
+-
+-typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
+- struct iscsi_segment *);
+-
+-struct iscsi_segment {
+- unsigned char *data;
+- unsigned int size;
+- unsigned int copied;
+- unsigned int total_size;
+- unsigned int total_copied;
+-
+- struct hash_desc *hash;
+- unsigned char recv_digest[ISCSI_DIGEST_SIZE];
+- unsigned char digest[ISCSI_DIGEST_SIZE];
+- unsigned int digest_len;
+-
+- struct scatterlist *sg;
+- void *sg_mapped;
+- unsigned int sg_offset;
+-
+- iscsi_segment_done_fn_t *done;
+-};
+
+ /* Socket connection recieve helper */
+ struct iscsi_tcp_recv {
+ struct iscsi_hdr *hdr;
+- struct iscsi_segment segment;
+-
+- /* Allocate buffer for BHS + AHS */
+- uint32_t hdr_buf[64];
++ struct sk_buff *skb;
++ int offset;
++ int len;
++ int hdr_offset;
++ int copy;
++ int copied;
++ int padding;
++ struct iscsi_cmd_task *ctask; /* current cmd in progress */
+
+ /* copied and flipped values */
+ int datalen;
+-};
+-
+-/* Socket connection send helper */
+-struct iscsi_tcp_send {
+- struct iscsi_hdr *hdr;
+- struct iscsi_segment segment;
+- struct iscsi_segment data_segment;
++ int datadgst;
++ char zero_copy_hdr;
+ };
+
+ struct iscsi_tcp_conn {
+ struct iscsi_conn *iscsi_conn;
+ struct socket *sock;
++ struct iscsi_hdr hdr; /* header placeholder */
++ char hdrext[4*sizeof(__u16) +
++ sizeof(__u32)];
++ int data_copied;
+ int stop_stage; /* conn_stop() flag: *
+ * stop to recover, *
+ * stop to terminate */
++ /* iSCSI connection-wide sequencing */
++ int hdr_size; /* PDU header size */
++
+ /* control data */
+ struct iscsi_tcp_recv in; /* TCP receive context */
+- struct iscsi_tcp_send out; /* TCP send context */
++ int in_progress; /* connection state machine */
+
+ /* old values for socket callbacks */
+ void (*old_data_ready)(struct sock *, int);
+@@ -93,14 +100,29 @@ struct iscsi_tcp_conn {
+ uint32_t sendpage_failures_cnt;
+ uint32_t discontiguous_hdr_cnt;
+
+- int error;
+-
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+ };
+
++struct iscsi_buf {
++ struct scatterlist sg;
++ unsigned int sent;
++ char use_sendmsg;
++};
++
+ struct iscsi_data_task {
+ struct iscsi_data hdr; /* PDU */
+- char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
++ char hdrext[sizeof(__u32)]; /* Header-Digest */
++ struct iscsi_buf digestbuf; /* digest buffer */
++ uint32_t digest; /* data digest */
++};
++
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[sizeof(__u32)]; /* Header-Digest */
++ int xmstate; /* mgmt xmit progress */
++ struct iscsi_buf headbuf; /* header buffer */
++ struct iscsi_buf sendbuf; /* in progress buffer */
++ int sent;
+ };
+
+ struct iscsi_r2t_info {
+@@ -108,26 +130,38 @@ struct iscsi_r2t_info {
+ __be32 exp_statsn; /* copied from R2T */
+ uint32_t data_length; /* copied from R2T */
+ uint32_t data_offset; /* copied from R2T */
++ struct iscsi_buf headbuf; /* Data-Out Header Buffer */
++ struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/
+ int sent; /* R2T sequence progress */
+ int data_count; /* DATA-Out payload progress */
++ struct scatterlist *sg; /* per-R2T SG list */
+ int solicit_datasn;
+- struct iscsi_data_task dtask; /* Data-Out header buf */
++ struct iscsi_data_task dtask; /* which data task */
+ };
+
+-struct iscsi_tcp_task {
+- struct iscsi_hdr_buff {
+- struct iscsi_cmd cmd_hdr;
+- char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+- ISCSI_DIGEST_SIZE];
+- } hdr;
+-
++struct iscsi_tcp_cmd_task {
++ struct iscsi_cmd hdr;
++ char hdrext[4*sizeof(__u16)+ /* AHS */
++ sizeof(__u32)]; /* HeaderDigest */
++ char pad[ISCSI_PAD_LEN];
++ int pad_count; /* padded bytes */
++ struct iscsi_buf headbuf; /* header buf (xmit) */
++ struct iscsi_buf sendbuf; /* in progress buffer*/
++ int xmstate; /* xmit xtate machine */
+ int sent;
+- uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
++ struct scatterlist *sg; /* per-cmd SG list */
++ struct scatterlist *bad_sg; /* assert statement */
++ int sg_count; /* SG's to process */
++ uint32_t exp_r2tsn;
+ int data_offset;
+- struct iscsi_r2t_info *r2t; /* in progress R2T */
+- struct iscsi_pool r2tpool;
++ struct iscsi_r2t_info *r2t; /* in progress R2T */
++ struct iscsi_queue r2tpool;
+ struct kfifo *r2tqueue;
+- struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
++ struct iscsi_r2t_info **r2ts;
++ int digest_count;
++ uint32_t immdigest; /* for imm data */
++ struct iscsi_buf immbuf; /* for imm data digest */
++ struct iscsi_data_task unsol_dtask; /* unsol data task */
+ };
+
+ #endif /* ISCSI_H */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..3f5b9b4 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -22,9 +22,9 @@
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+ #include <linux/types.h>
++#include <linux/mutex.h>
+ #include <linux/kfifo.h>
+ #include <linux/delay.h>
+-#include <linux/log2.h>
+ #include <asm/unaligned.h>
+ #include <net/tcp.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -38,211 +38,92 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
+-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+-#define SNA32_CHECK 2147483648UL
+-
+-static int iscsi_sna_lt(u32 n1, u32 n2)
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
+ {
+- return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
+ }
++EXPORT_SYMBOL_GPL(class_to_transport_session);
+
+-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+-static int iscsi_sna_lte(u32 n1, u32 n2)
+-{
+- return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
+-}
++#define INVALID_SN_DELTA 0xffff
+
+-void
+-iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
++int
++iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ {
+ uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
+ uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
+
+- /*
+- * standard specifies this check for when to update expected and
+- * max sequence numbers
+- */
+- if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
+- return;
+-
+- if (exp_cmdsn != session->exp_cmdsn &&
+- !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
++ if (max_cmdsn < exp_cmdsn -1 &&
++ max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
++ return ISCSI_ERR_MAX_CMDSN;
++ if (max_cmdsn > session->max_cmdsn ||
++ max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
++ session->max_cmdsn = max_cmdsn;
++ if (exp_cmdsn > session->exp_cmdsn ||
++ exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
+ session->exp_cmdsn = exp_cmdsn;
+
+- if (max_cmdsn != session->max_cmdsn &&
+- !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+- session->max_cmdsn = max_cmdsn;
+- /*
+- * if the window closed with IO queued, then kick the
+- * xmit thread
+- */
+- if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
+- }
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
++EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
+-{
+- unsigned exp_len = task->hdr_len + len;
+-
+- if (exp_len > task->hdr_max) {
+- WARN_ON(1);
+- return -EINVAL;
+- }
+-
+- WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
+- return 0;
+-}
+-
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
+- int rc;
+-
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
+- if (rc)
+- return rc;
+- hdr->opcode = ISCSI_OP_SCSI_CMD;
+- hdr->flags = ISCSI_ATTR_SIMPLE;
+- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
+- hdr->cmdsn = cpu_to_be32(session->cmdsn);
+- session->cmdsn++;
+- hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
+-
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++
++ hdr->opcode = ISCSI_OP_SCSI_CMD;
++ hdr->flags = ISCSI_ATTR_SIMPLE;
++ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
++ hdr->itt = build_itt(ctask->itt, conn->id, session->age);
++ hdr->data_length = cpu_to_be32(sc->request_bufflen);
++ hdr->cmdsn = cpu_to_be32(session->cmdsn);
++ session->cmdsn++;
++ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
++
++ ctask->data_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,318 +139,117 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->imm_count = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (ctask->total_length >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(ctask->total_length,
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(ctask->hdr->dlength, ctask->imm_count);
+ } else
+- zero_data(hdr->dlength);
++ zero_data(ctask->hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min(session->first_burst,
++ ctask->total_length) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
++ ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
++ ctask->datasn = 0;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+- /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
+-
+- WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+- hdrlength /= ISCSI_PAD_LEN;
+-
+- WARN_ON(hdrlength >= 256);
+- hdr->hlength = hdrlength & 0xFF;
+-
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
+-
+ conn->scsicmd_pdus_cnt++;
+- debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+- return 0;
+ }
++EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
++ struct iscsi_session *session = ctask->conn->session;
++ struct scsi_cmnd *sc = ctask->sc;
+
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+-
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+
+-void __iscsi_get_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- atomic_inc(&task->refcount);
++ atomic_inc(&ctask->refcount);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ spin_lock_bh(&ctask->conn->session->lock);
++ __iscsi_get_ctask(ctask);
++ spin_unlock_bh(&ctask->conn->session->lock);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+-/*
+- * session lock must be held
+- */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
+- int err)
++static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct scsi_cmnd *sc;
+-
+- sc = task->sc;
+- if (!sc)
+- return;
+-
+- if (task->state == ISCSI_TASK_PENDING)
+- /*
+- * cmd never made it to the xmit thread, so we should not count
+- * the cmd in the sequencing
+- */
+- conn->session->queued_cmdsn--;
+- else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
+-
+- sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ spin_lock_bh(&ctask->conn->session->lock);
++ __iscsi_put_ctask(ctask);
++ spin_unlock_bh(&ctask->conn->session->lock);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
+-{
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
+-
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
+-}
+-
+-static struct iscsi_task *
+-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, uint32_t data_size)
+-{
+- struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
+-
+- if (session->state == ISCSI_STATE_TERMINATE)
+- return NULL;
+-
+- if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
+- hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- /*
+- * Login and Text are sent serially, in
+- * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
+- */
+- task = conn->login_task;
+- else {
+- BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+- BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+-
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
+- return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+- }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+-
+- if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
+- } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
+-
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
+-}
+-
+-int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+- char *data, uint32_t data_size)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_session *session = conn->session;
+- int err = 0;
+-
+- spin_lock_bh(&session->lock);
+- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+- err = -EPERM;
+- spin_unlock_bh(&session->lock);
+- return err;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+-
+ /**
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+ * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
+ * then completes the command and task.
+ **/
+-static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
+- int datalen)
++static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ struct iscsi_cmd_task *ctask, char *data,
++ int datalen)
+ {
++ int rc;
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
++
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc) {
++ sc->result = DID_ERROR << 16;
++ goto out;
++ }
+
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+@@ -584,14 +264,13 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+
+ if (datalen < 2) {
+ invalid_datalen:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "Got CHECK_CONDITION but invalid data "
+- "buffer size of %d\n", datalen);
++ printk(KERN_ERR "iscsi: Got CHECK_CONDITION but "
++ "invalid data buffer size of %d\n", datalen);
+ sc->result = DID_BAD_TARGET << 16;
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,36 +280,28 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ if (sc->sc_data_direction == DMA_TO_DEVICE)
++ goto out;
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+- ISCSI_FLAG_CMD_OVERFLOW)) {
++ if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+- if (res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+- scsi_set_resid(sc, res_count);
++ if (res_count > 0 && res_count <= sc->request_bufflen)
++ sc->resid = res_count;
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++ else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
++ sc->resid = be32_to_cpu(rhdr->residual_count);
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
++ return rc;
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -640,42 +311,18 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ conn->tmfrsp_pdus_cnt++;
+
+- if (conn->tmf_state != TMF_QUEUED)
++ if (conn->tmabort_state != TMABORT_INITIAL)
+ return;
+
+ if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
+- conn->tmf_state = TMF_SUCCESS;
++ conn->tmabort_state = TMABORT_SUCCESS;
+ else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
+- conn->tmf_state = TMF_NOT_FOUND;
++ conn->tmabort_state = TMABORT_NOT_FOUND;
+ else
+- conn->tmf_state = TMF_FAILED;
++ conn->tmabort_state = TMABORT_FAILED;
+ wake_up(&conn->ehwait);
+ }
+
+-static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+-{
+- struct iscsi_nopout hdr;
+- struct iscsi_task *task;
+-
+- if (!rhdr && conn->ping_task)
+- return;
+-
+- memset(&hdr, 0, sizeof(struct iscsi_nopout));
+- hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
+- hdr.flags = ISCSI_FLAG_CMD_FINAL;
+-
+- if (rhdr) {
+- memcpy(hdr.lun, rhdr->lun, 8);
+- hdr.ttt = rhdr->ttt;
+- hdr.itt = RESERVED_ITT;
+- } else
+- hdr.ttt = RESERVED_ITT;
+-
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
+- iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+-}
+-
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+ {
+@@ -692,41 +339,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
+ memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
+ itt = get_itt(rejected_pdu.itt);
+- iscsi_conn_printk(KERN_ERR, conn,
+- "itt 0x%x had pdu (op 0x%x) rejected "
+- "due to DataDigest error.\n", itt,
+- rejected_pdu.opcode);
++ printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
++ "due to DataDigest error.\n", itt,
++ rejected_pdu.opcode);
+ }
+ }
+ return 0;
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -742,24 +363,105 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+- conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ rc = iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ rc = iscsi_check_assign_cmdsn(session,
++ (struct iscsi_nopin*)hdr);
++ if (rc)
++ goto done;
++
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ list_del(&mtask->running);
++ if (conn->login_mtask != mtask)
++ __kfifo_put(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ list_del(&mtask->running);
++ if (conn->login_mtask != mtask)
++ __kfifo_put(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
++ rc = iscsi_check_assign_cmdsn(session,
++ (struct iscsi_nopin*)hdr);
++ if (rc)
++ goto done;
+
+ switch(opcode) {
+ case ISCSI_OP_NOOP_IN:
+@@ -771,7 +473,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
+ break;
+
+- iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
++ rc = ISCSI_ERR_CONN_FAILED;
+ break;
+ case ISCSI_OP_REJECT:
+ rc = iscsi_handle_reject(conn, hdr, data, datalen);
+@@ -785,101 +488,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
++done:
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+@@ -896,63 +508,55 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ printk(KERN_ERR "iscsi: received itt %x expected "
++ "session age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
++ (conn->id << ISCSI_CID_SHIFT)) {
++ printk(KERN_ERR "iscsi: received itt %x, expected "
++ "CID (%x)\n", (__force u32)hdr->itt, conn->id);
++ return ISCSI_ERR_BAD_ITT;
++ }
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ printk(KERN_INFO "iscsi: dropping ctask with "
++ "itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ printk(KERN_ERR "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,55 +578,29 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
+-static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
+ {
+- struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc, was_logout = 0;
+
+- /*
+- * Check for iSCSI window and take care of CmdSN wrap-around
+- */
+- if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
+- debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u "
+- "CmdSN %u/%u\n", session->exp_cmdsn,
+- session->max_cmdsn, session->cmdsn,
+- session->queued_cmdsn);
+- return -ENOSPC;
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
++ conn->session->state = ISCSI_STATE_IN_RECOVERY;
++ iscsi_block_session(session_to_cls(conn->session));
++ was_logout = 1;
+ }
+- return 0;
+-}
+-
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
+-{
+- struct iscsi_task *task = conn->task;
+- int rc;
+-
+- __iscsi_get_task(task);
+- spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
+- spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
+- if (!rc)
+- /* done with this task */
+- conn->task = NULL;
+- return rc;
+-}
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ if (rc)
++ return rc;
+
+-/**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
+- *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
+- */
+-void iscsi_requeue_task(struct iscsi_task *task)
+-{
+- struct iscsi_conn *conn = task->conn;
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
+
+- list_move_tail(&task->running, &conn->requeue);
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ if (was_logout) {
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++ return -ENODATA;
++ }
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1035,94 +613,106 @@ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+ **/
+ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ {
++ struct iscsi_transport *tt;
+ int rc = 0;
+
+- spin_lock_bh(&conn->session->lock);
+ if (unlikely(conn->suspend_tx)) {
+ debug_scsi("conn %d Tx suspended!\n", conn->id);
+- spin_unlock_bh(&conn->session->lock);
+ return -ENODATA;
+ }
+-
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
+- if (rc)
+- goto again;
+- }
++ tt = conn->session->tt;
+
+ /*
+- * process mgmt pdus like nops before commands since we should
+- * only have one nop-out as a ping from us and targets should not
+- * overflow us with nop-ins
++ * Transmit in the following order:
++ *
++ * 1) un-finished xmit (ctask or mtask)
++ * 2) immediate control PDUs
++ * 3) write data
++ * 4) SCSI commands
++ * 5) non-immediate control PDUs
++ *
++ * No need to lock around __kfifo_get as long as
++ * there's one producer and one consumer.
+ */
+-check_mgmt:
+- while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
+- continue;
+- }
+- rc = iscsi_xmit_task(conn);
+- if (rc)
+- goto again;
+- }
+
+- /* process pending command queue */
+- while (!list_empty(&conn->xmitqueue)) {
+- if (conn->tmf_state == TMF_QUEUED)
+- break;
++ BUG_ON(conn->ctask && conn->mtask);
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
+- continue;
+- }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
+- continue;
+- }
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ iscsi_get_ctask(conn->ctask);
++ rc = tt->xmit_cmd_task(conn, conn->ctask);
++ iscsi_put_ctask(conn->ctask);
+ if (rc)
+ goto again;
+- /*
+- * we could continuously get new task requests so
+- * we need to check the mgmt queue for nops that need to
+- * be sent to aviod starvation
+- */
+- if (!list_empty(&conn->mgmtqueue))
+- goto check_mgmt;
++ /* done with this in-progress ctask */
++ conn->ctask = NULL;
++ }
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
+ }
+
+- while (!list_empty(&conn->requeue)) {
+- if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
+- break;
++ /* process immediate first */
++ if (unlikely(__kfifo_len(conn->immqueue))) {
++ while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
++ sizeof(void*))) {
++ spin_lock_bh(&conn->session->lock);
++ list_add_tail(&conn->mtask->running,
++ &conn->mgmt_run_list);
++ spin_unlock_bh(&conn->session->lock);
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
++ }
++ }
+
++ /* process command queue */
++ spin_lock_bh(&conn->session->lock);
++ while (!list_empty(&conn->xmitqueue)) {
+ /*
+- * we always do fastlogout - conn stop code will clean up.
++ * iscsi tcp may readd the task to the xmitqueue to send
++ * write data
+ */
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- break;
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ __iscsi_get_ctask(conn->ctask);
++ spin_unlock_bh(&conn->session->lock);
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
+- if (rc)
++ rc = tt->xmit_cmd_task(conn, conn->ctask);
++
++ spin_lock_bh(&conn->session->lock);
++ __iscsi_put_ctask(conn->ctask);
++ if (rc) {
++ spin_unlock_bh(&conn->session->lock);
+ goto again;
+- if (!list_empty(&conn->mgmtqueue))
+- goto check_mgmt;
++ }
+ }
+ spin_unlock_bh(&conn->session->lock);
++ /* done with this ctask */
++ conn->ctask = NULL;
++
++ /* process the rest control plane PDUs, if any */
++ if (unlikely(__kfifo_len(conn->mgmtqueue))) {
++ while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
++ sizeof(void*))) {
++ spin_lock_bh(&conn->session->lock);
++ list_add_tail(&conn->mtask->running,
++ &conn->mgmt_run_list);
++ spin_unlock_bh(&conn->session->lock);
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
++ }
++ }
++
+ return -ENODATA;
+
+ again:
+ if (unlikely(conn->suspend_tx))
+- rc = -ENODATA;
+- spin_unlock_bh(&conn->session->lock);
++ return -ENODATA;
++
+ return rc;
+ }
+
+@@ -1134,9 +724,11 @@ static void iscsi_xmitworker(struct work_struct *work)
+ /*
+ * serialize Xmit worker on a per-connection basis.
+ */
++ mutex_lock(&conn->xmitmutex);
+ do {
+ rc = iscsi_data_xmit(conn);
+ } while (rc >= 0 || rc == -EAGAIN);
++ mutex_unlock(&conn->xmitmutex);
+ }
+
+ enum {
+@@ -1148,36 +740,25 @@ enum {
+ FAILURE_SESSION_TERMINATE,
+ FAILURE_SESSION_IN_RECOVERY,
+ FAILURE_SESSION_RECOVERY_TIMEOUT,
+- FAILURE_SESSION_LOGGING_OUT,
+- FAILURE_SESSION_NOT_READY,
+ };
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+ sc->SCp.ptr = NULL;
+
+ host = sc->device->host;
+- spin_unlock(host->host_lock);
++ session = iscsi_hostdata(host->hostdata);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
+- if (reason) {
+- sc->result = reason;
+- goto fault;
+- }
+-
+ /*
+ * ISCSI_STATE_FAILED is a temp. state. The recovery
+ * code will decide what is best to do with command queued
+@@ -1191,95 +772,77 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ * be entering our queuecommand while a block is starting
+ * up because the block code is not locked)
+ */
+- switch (session->state) {
+- case ISCSI_STATE_IN_RECOVERY:
++ if (session->state == ISCSI_STATE_IN_RECOVERY) {
+ reason = FAILURE_SESSION_IN_RECOVERY;
+- sc->result = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_STATE_LOGGING_OUT:
+- reason = FAILURE_SESSION_LOGGING_OUT;
+- sc->result = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_STATE_RECOVERY_FAILED:
++ goto reject;
++ }
++
++ if (session->state == ISCSI_STATE_RECOVERY_FAILED)
+ reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
+- sc->result = DID_NO_CONNECT << 16;
+- break;
+- case ISCSI_STATE_TERMINATE:
++ else if (session->state == ISCSI_STATE_TERMINATE)
+ reason = FAILURE_SESSION_TERMINATE;
+- sc->result = DID_NO_CONNECT << 16;
+- break;
+- default:
++ else
+ reason = FAILURE_SESSION_FREED;
+- sc->result = DID_NO_CONNECT << 16;
+- }
+ goto fault;
+ }
+
++ /*
++ * Check for iSCSI window and take care of CmdSN wrap-around
++ */
++ if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
++ reason = FAILURE_WINDOW_CLOSED;
++ goto reject;
++ }
++
+ conn = session->leadconn;
+ if (!conn) {
+ reason = FAILURE_SESSION_FREED;
+- sc->result = DID_NO_CONNECT << 16;
+ goto fault;
+ }
+
+- if (iscsi_check_cmdsn_window_closed(conn)) {
+- reason = FAILURE_WINDOW_CLOSED;
+- goto reject;
+- }
+-
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
+-
+- session->queued_cmdsn++;
++ sc->SCp.ptr = (char *)ctask;
++
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->mtask = NULL;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++ ctask->total_length = sc->request_bufflen;
++ iscsi_prep_scsi_cmd_pdu(ctask);
++
++ session->tt->init_cmd_task(ctask);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
++ debug_scsi(
++ "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
++ "win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ spin_unlock(&session->lock);
+- spin_lock(host->host_lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ return 0;
+
+ reject:
+ spin_unlock(&session->lock);
+ debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
+- spin_lock(host->host_lock);
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ fault:
+ spin_unlock(&session->lock);
+- debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
+- spin_lock(host->host_lock);
++ printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
++ sc->cmnd[0], reason);
++ sc->result = (DID_NO_CONNECT << 16);
++ sc->resid = sc->request_bufflen;
++ sc->scsi_done(sc);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_queuecommand);
+@@ -1293,15 +856,106 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
++static int
++iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++ struct iscsi_mgmt_task *mtask;
++
++ spin_lock_bh(&session->lock);
++ if (session->state == ISCSI_STATE_TERMINATE) {
++ spin_unlock_bh(&session->lock);
++ return -EPERM;
++ }
++ if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
++ hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ /*
++ * Login and Text are sent serially, in
++ * request-followed-by-response sequence.
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
++ */
++ mtask = conn->login_mtask;
++ else {
++ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
++ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
++
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*))) {
++ spin_unlock_bh(&session->lock);
++ return -ENOSPC;
++ }
++ }
++
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, conn->id, session->age);
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE))
++ session->cmdsn++;
++ } else
++ /* do not advance CmdSN */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++
++ if (data_size) {
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
++ } else
++ mtask->data_count = 0;
++
++ INIT_LIST_HEAD(&mtask->running);
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask, data, data_size);
++ spin_unlock_bh(&session->lock);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode, hdr->itt, data_size);
++
++ /*
++ * since send_pdu() could be called at least from two contexts,
++ * we need to serialize __kfifo_put, so we don't have to take
++ * additional lock on fast data-path
++ */
++ if (hdr->opcode & ISCSI_OP_IMMEDIATE)
++ __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
++ else
++ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
++
++ scsi_queue_work(session->host, &conn->xmitwork);
++ return 0;
++}
++
++int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_conn *conn = cls_conn->dd_data;
++ int rc;
++
++ mutex_lock(&conn->xmitmutex);
++ rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
++ mutex_unlock(&conn->xmitmutex);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
++
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
++ struct iscsi_conn *conn = session->leadconn;
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+ session->state = ISCSI_STATE_RECOVERY_FAILED;
+- if (session->leadconn)
+- wake_up(&session->leadconn->ehwait);
++ if (conn)
++ wake_up(&conn->ehwait);
+ }
+ spin_unlock_bh(&session->lock);
+ }
+@@ -1309,32 +963,33 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
++ int fail_session = 0;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
+-
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+ failed:
+ debug_scsi("failing host reset: session terminated "
+ "[CID %d age %d]\n", conn->id, session->age);
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+
++ if (sc->SCp.phase == session->age) {
++ debug_scsi("failing connection CID %d due to SCSI host reset\n",
++ conn->id);
++ fail_session = 1;
++ }
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
+ /*
+ * we drop the lock here but the leadconn cannot be destoyed while
+ * we are in the scsi eh
+ */
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ if (fail_session)
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+
+ debug_scsi("iscsi_eh_host_reset wait for relogin\n");
+ wait_event_interruptible(conn->ehwait,
+@@ -1344,717 +999,472 @@ failed:
+ if (signal_pending(current))
+ flush_signals(current);
+
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_LOGGED_IN)
+- iscsi_session_printk(KERN_INFO, session,
+- "host reset succeeded\n");
++ printk(KERN_INFO "iscsi: host reset succeeded\n");
+ else
+ goto failed;
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
+ return SUCCESS;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
+
+-static void iscsi_tmf_timedout(unsigned long data)
++static void iscsi_tmabort_timedout(unsigned long data)
+ {
+- struct iscsi_conn *conn = (struct iscsi_conn *)data;
++ struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&session->lock);
+- if (conn->tmf_state == TMF_QUEUED) {
+- conn->tmf_state = TMF_TIMEDOUT;
+- debug_scsi("tmf timedout\n");
++ if (conn->tmabort_state == TMABORT_INITIAL) {
++ conn->tmabort_state = TMABORT_TIMEDOUT;
++ debug_scsi("tmabort timedout [sc %p itt 0x%x]\n",
++ ctask->sc, ctask->itt);
+ /* unblock eh_abort() */
+ wake_up(&conn->ehwait);
+ }
+ spin_unlock(&session->lock);
+ }
+
+-static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+- struct iscsi_tm *hdr, int age,
+- int timeout)
++/* must be called with the mutex lock */
++static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
++ struct iscsi_cmd_task *ctask)
+ {
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_tm *hdr = &conn->tmhdr;
++ int rc;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+- NULL, 0);
+- if (!task) {
+- spin_unlock_bh(&session->lock);
++ /*
++ * ctask timed out but session is OK requests must be serialized.
++ */
++ memset(hdr, 0, sizeof(struct iscsi_tm));
++ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
++ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
++ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
++
++ rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
++ NULL, 0);
++ if (rc) {
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- spin_lock_bh(&session->lock);
+- debug_scsi("tmf exec failure\n");
+- return -EPERM;
++ debug_scsi("abort sent failure [itt 0x%x] %d\n", ctask->itt,
++ rc);
++ return rc;
+ }
+- conn->tmfcmd_pdus_cnt++;
+- conn->tmf_timer.expires = timeout * HZ + jiffies;
+- conn->tmf_timer.function = iscsi_tmf_timedout;
+- conn->tmf_timer.data = (unsigned long)conn;
+- add_timer(&conn->tmf_timer);
+- debug_scsi("tmf set timeout\n");
+
++ debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
++
++ spin_lock_bh(&session->lock);
++ ctask->mtask = (struct iscsi_mgmt_task *)
++ session->mgmt_cmds[get_itt(hdr->itt) -
++ ISCSI_MGMT_ITT_OFFSET];
++
++ if (conn->tmabort_state == TMABORT_INITIAL) {
++ conn->tmfcmd_pdus_cnt++;
++ conn->tmabort_timer.expires = 10*HZ + jiffies;
++ conn->tmabort_timer.function = iscsi_tmabort_timedout;
++ conn->tmabort_timer.data = (unsigned long)ctask;
++ add_timer(&conn->tmabort_timer);
++ debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
++ }
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++ mutex_unlock(&conn->xmitmutex);
+
+ /*
+ * block eh thread until:
+ *
+- * 1) tmf response
+- * 2) tmf timeout
++ * 1) abort response
++ * 2) abort timeout
+ * 3) session is terminated or restarted or userspace has
+ * given up on recovery
+ */
+- wait_event_interruptible(conn->ehwait, age != session->age ||
++ wait_event_interruptible(conn->ehwait,
++ sc->SCp.phase != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN ||
+- conn->tmf_state != TMF_QUEUED);
++ conn->tmabort_state != TMABORT_INITIAL);
+ if (signal_pending(current))
+ flush_signals(current);
+- del_timer_sync(&conn->tmf_timer);
++ del_timer_sync(&conn->tmabort_timer);
+
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
+- if (age != session->age ||
+- session->state != ISCSI_STATE_LOGGED_IN)
+- return -ENOTCONN;
++ mutex_lock(&conn->xmitmutex);
+ return 0;
+ }
+
+ /*
+- * Fail commands. session lock held and recv side suspended and xmit
+- * thread flushed
++ * xmit mutex and session lock must be held
+ */
+-static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+- int error)
++static struct iscsi_mgmt_task *
++iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
+ {
+- struct iscsi_task *task, *tmp;
++ int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
++ struct iscsi_mgmt_task *task;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ debug_scsi("searching %d tasks\n", nr_tasks);
+
+- /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
+- }
+- }
++ for (i = 0; i < nr_tasks; i++) {
++ __kfifo_get(fifo, (void*)&task, sizeof(void*));
++ debug_scsi("check task %u\n", task->itt);
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ if (task->itt == itt) {
++ debug_scsi("matched task\n");
++ return task;
+ }
+- }
+
+- /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
+- }
++ __kfifo_put(fifo, (void*)&task, sizeof(void*));
+ }
++ return NULL;
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
+-{
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+-
+-static void iscsi_start_tx(struct iscsi_conn *conn)
+-{
+- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-}
+-
+-static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
++static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+- enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+-
+- cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
+-
+- debug_scsi("scsi cmd %p timedout\n", scmd);
+-
+- spin_lock(&session->lock);
+- if (session->state != ISCSI_STATE_LOGGED_IN) {
+- /*
+- * We are probably in the middle of iscsi recovery so let
+- * that complete and handle the error.
+- */
+- rc = EH_RESET_TIMER;
+- goto done;
+- }
++ struct iscsi_conn *conn = ctask->conn;
++ struct iscsi_session *session = conn->session;
+
+- conn = session->leadconn;
+- if (!conn) {
+- /* In the middle of shuting down */
+- rc = EH_RESET_TIMER;
+- goto done;
+- }
++ if (!ctask->mtask)
++ return -EINVAL;
+
+- if (!conn->recv_timeout && !conn->ping_timeout)
+- goto done;
+- /*
+- * if the ping timedout then we are in the middle of cleaning up
+- * and can let the iscsi eh handle it
+- */
+- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+- (conn->ping_timeout * HZ), jiffies))
+- rc = EH_RESET_TIMER;
+- /*
+- * if we are about to check the transport then give the command
+- * more time
+- */
+- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
+- jiffies))
+- rc = EH_RESET_TIMER;
+- /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
+- rc = EH_RESET_TIMER;
+-done:
+- spin_unlock(&session->lock);
+- debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
+- return rc;
++ if (!iscsi_remove_mgmt_task(conn->immqueue, ctask->mtask->itt))
++ list_del(&ctask->mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
++ sizeof(void*));
++ ctask->mtask = NULL;
++ return 0;
+ }
+
+-static void iscsi_check_transport_timeouts(unsigned long data)
++/*
++ * session lock and xmitmutex must be held
++ */
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ int err)
+ {
+- struct iscsi_conn *conn = (struct iscsi_conn *)data;
+- struct iscsi_session *session = conn->session;
+- unsigned long recv_timeout, next_timeout = 0, last_recv;
++ struct scsi_cmnd *sc;
+
+- spin_lock(&session->lock);
+- if (session->state != ISCSI_STATE_LOGGED_IN)
+- goto done;
+-
+- recv_timeout = conn->recv_timeout;
+- if (!recv_timeout)
+- goto done;
+-
+- recv_timeout *= HZ;
+- last_recv = conn->last_recv;
+- if (conn->ping_task &&
+- time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+- jiffies)) {
+- iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+- "expired, last rx %lu, last ping %lu, "
+- "now %lu\n", conn->ping_timeout, last_recv,
+- conn->last_ping, jiffies);
+- spin_unlock(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ sc = ctask->sc;
++ if (!sc)
+ return;
+- }
+
+- if (time_before_eq(last_recv + recv_timeout, jiffies)) {
+- /* send a ping to try to provoke some traffic */
+- debug_scsi("Sending nopout as ping on conn %p\n", conn);
+- iscsi_send_nopout(conn, NULL);
+- next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+- } else
+- next_timeout = last_recv + recv_timeout;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
++ iscsi_ctask_mtask_cleanup(ctask);
+
+- debug_scsi("Setting next tmo %lu\n", next_timeout);
+- mod_timer(&conn->transport_timer, next_timeout);
+-done:
+- spin_unlock(&session->lock);
+-}
+-
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
+- struct iscsi_tm *hdr)
+-{
+- memset(hdr, 0, sizeof(*hdr));
+- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+- hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ sc->result = err;
++ sc->resid = sc->request_bufflen;
++ /* release ref from queuecommand */
++ __iscsi_put_ctask(ctask);
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
+- struct iscsi_tm *hdr;
+- int rc, age;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ struct iscsi_session *session;
++ int rc;
+
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ debug_scsi("sc never reached iscsi layer or it completed.\n");
+- spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+ }
+
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ conn = ctask->conn;
++ session = conn->session;
++
++ conn->eh_abort_cnt++;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
++
++ mutex_lock(&conn->xmitmutex);
++ spin_lock_bh(&session->lock);
++
+ /*
+ * If we are not logged in or we have started a new session
+ * then let the host reset code handle this
+ */
+- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
+- sc->SCp.phase != session->age) {
+- spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+- return FAILED;
+- }
+-
+- conn = session->leadconn;
+- conn->eh_abort_cnt++;
+- age = session->age;
+-
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ if (session->state != ISCSI_STATE_LOGGED_IN ||
++ sc->SCp.phase != session->age)
++ goto failed;
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
++ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+- goto success;
++ goto success_rel_mutex;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
+- goto success;
++ /* what should we do here ? */
++ if (conn->ctask == ctask) {
++ printk(KERN_INFO "iscsi: sc %p itt 0x%x partially sent. "
++ "Failing abort\n", sc, ctask->itt);
++ goto failed;
+ }
+
+- /* only have one tmf outstanding at a time */
+- if (conn->tmf_state != TMF_INITIAL)
+- goto failed;
+- conn->tmf_state = TMF_QUEUED;
++ if (ctask->state == ISCSI_TASK_PENDING)
++ goto success_cleanup;
+
+- hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ conn->tmabort_state = TMABORT_INITIAL;
+
+- if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+- rc = FAILED;
+- goto failed;
+- }
++ spin_unlock_bh(&session->lock);
++ rc = iscsi_exec_abort_task(sc, ctask);
++ spin_lock_bh(&session->lock);
+
+- switch (conn->tmf_state) {
+- case TMF_SUCCESS:
+- spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+- iscsi_suspend_tx(conn);
+- /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
+- */
+- spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
+- conn->tmf_state = TMF_INITIAL;
+- spin_unlock(&session->lock);
+- iscsi_start_tx(conn);
+- goto success_unlocked;
+- case TMF_TIMEDOUT:
+- spin_unlock_bh(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- goto failed_unlocked;
+- case TMF_NOT_FOUND:
+- if (!sc->SCp.ptr) {
+- conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ if (rc || sc->SCp.phase != session->age ||
++ session->state != ISCSI_STATE_LOGGED_IN)
++ goto failed;
++ iscsi_ctask_mtask_cleanup(ctask);
++
++ switch (conn->tmabort_state) {
++ case TMABORT_SUCCESS:
++ goto success_cleanup;
++ case TMABORT_NOT_FOUND:
++ if (!ctask->sc) {
++ /* ctask completed before tmf abort response */
++ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+- goto success;
++ goto success_rel_mutex;
+ }
+ /* fall through */
+ default:
+- conn->tmf_state = TMF_INITIAL;
++ /* timedout or failed */
++ spin_unlock_bh(&session->lock);
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ spin_lock_bh(&session->lock);
+ goto failed;
+ }
+
+-success:
++success_cleanup:
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ spin_unlock_bh(&session->lock);
+-success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
+- mutex_unlock(&session->eh_mutex);
+- return SUCCESS;
+
+-failed:
+- spin_unlock_bh(&session->lock);
+-failed_unlocked:
+- debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
+- mutex_unlock(&session->eh_mutex);
+- return FAILED;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+-
+-static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+-{
+- memset(hdr, 0, sizeof(*hdr));
+- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+- hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->rtt = RESERVED_ITT;
+-}
+-
+-int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+-{
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+- struct iscsi_tm *hdr;
+- int rc = FAILED;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+- debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+-
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+ /*
+- * Just check if we are not logged in. We cannot check for
+- * the phase because the reset could come from a ioctl.
++ * clean up task if aborted. we have the xmitmutex so grab
++ * the recv lock as a writer
+ */
+- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+- goto unlock;
+- conn = session->leadconn;
+-
+- /* only have one tmf outstanding at a time */
+- if (conn->tmf_state != TMF_INITIAL)
+- goto unlock;
+- conn->tmf_state = TMF_QUEUED;
+-
+- hdr = &conn->tmhdr;
+- iscsi_prep_lun_reset_pdu(sc, hdr);
+-
+- if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+- session->lu_reset_timeout)) {
+- rc = FAILED;
+- goto unlock;
+- }
+-
+- switch (conn->tmf_state) {
+- case TMF_SUCCESS:
+- break;
+- case TMF_TIMEDOUT:
+- spin_unlock_bh(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- goto done;
+- default:
+- conn->tmf_state = TMF_INITIAL;
+- goto unlock;
+- }
+-
+- rc = SUCCESS;
+- spin_unlock_bh(&session->lock);
+-
+- iscsi_suspend_tx(conn);
+-
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_all_commands(conn, sc->device->lun, DID_ERROR);
+- conn->tmf_state = TMF_INITIAL;
++ fail_command(conn, ctask, DID_ABORT << 16);
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+- iscsi_start_tx(conn);
+- goto done;
++success_rel_mutex:
++ mutex_unlock(&conn->xmitmutex);
++ return SUCCESS;
+
+-unlock:
++failed:
+ spin_unlock_bh(&session->lock);
+-done:
+- debug_scsi("iscsi_eh_device_reset %s\n",
+- rc == SUCCESS ? "SUCCESS" : "FAILED");
+- mutex_unlock(&session->eh_mutex);
+- return rc;
++ mutex_unlock(&conn->xmitmutex);
++
++ debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
++ return FAILED;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
++EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+
+-/*
+- * Pre-allocate a pool of @max items of @item_size. By default, the pool
+- * should be accessed via kfifo_{get,put} on q->queue.
+- * Optionally, the caller can obtain the array of object pointers
+- * by passing in a non-NULL @items pointer
+- */
+ int
+-iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
++iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
+ {
+- int i, num_arrays = 1;
++ int i;
+
+- memset(q, 0, sizeof(*q));
++ *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
++ if (*items == NULL)
++ return -ENOMEM;
+
+ q->max = max;
+-
+- /* If the user passed an items pointer, he wants a copy of
+- * the array. */
+- if (items)
+- num_arrays++;
+- q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+- if (q->pool == NULL)
+- goto enomem;
++ q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
++ if (q->pool == NULL) {
++ kfree(*items);
++ return -ENOMEM;
++ }
+
+ q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
+ GFP_KERNEL, NULL);
+- if (q->queue == ERR_PTR(-ENOMEM))
+- goto enomem;
++ if (q->queue == ERR_PTR(-ENOMEM)) {
++ kfree(q->pool);
++ kfree(*items);
++ return -ENOMEM;
++ }
+
+ for (i = 0; i < max; i++) {
+- q->pool[i] = kzalloc(item_size, GFP_KERNEL);
++ q->pool[i] = kmalloc(item_size, GFP_KERNEL);
+ if (q->pool[i] == NULL) {
+- q->max = i;
+- goto enomem;
++ int j;
++
++ for (j = 0; j < i; j++)
++ kfree(q->pool[j]);
++
++ kfifo_free(q->queue);
++ kfree(q->pool);
++ kfree(*items);
++ return -ENOMEM;
+ }
++ memset(q->pool[i], 0, item_size);
++ (*items)[i] = q->pool[i];
+ __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
+ }
+-
+- if (items) {
+- *items = q->pool + max;
+- memcpy(*items, q->pool, max * sizeof(void *));
+- }
+-
+ return 0;
+-
+-enomem:
+- iscsi_pool_free(q);
+- return -ENOMEM;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_init);
+
+-void iscsi_pool_free(struct iscsi_pool *q)
++void iscsi_pool_free(struct iscsi_queue *q, void **items)
+ {
+ int i;
+
+ for (i = 0; i < q->max; i++)
+- kfree(q->pool[i]);
+- if (q->pool)
+- kfree(q->pool);
++ kfree(items[i]);
++ kfree(q->pool);
++ kfree(items);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
+- }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
+- }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ *hostno = shost->host_no;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
+- return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+- session->fast_abort = 1;
+- session->lu_reset_timeout = 15;
+- session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
+- session->queued_cmdsn = session->cmdsn = initial_cmdsn;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = ISCSI_XMIT_CMDS_MAX;
++ session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+- mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
++
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
+
+- if (iscsi_add_session(cls_session, id))
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
+- iscsi_pool_free(&session->cmdpool);
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
++mgmtpool_alloc_fail:
++ iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
+- iscsi_pool_free(&session->cmdpool);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
++ iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+
+- kfree(session->password);
+- kfree(session->password_in);
+- kfree(session->username);
+- kfree(session->username_in);
+ kfree(session->targetname);
+- kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+ iscsi_destroy_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,68 +1472,74 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+ conn->id = conn_idx;
+ conn->exp_statsn = 0;
+- conn->tmf_state = TMF_INITIAL;
+-
+- init_timer(&conn->transport_timer);
+- conn->transport_timer.data = (unsigned long)conn;
+- conn->transport_timer.function = iscsi_check_transport_timeouts;
+-
++ conn->tmabort_state = TMABORT_INITIAL;
+ INIT_LIST_HEAD(&conn->run_list);
+ INIT_LIST_HEAD(&conn->mgmt_run_list);
+- INIT_LIST_HEAD(&conn->mgmtqueue);
+ INIT_LIST_HEAD(&conn->xmitqueue);
+- INIT_LIST_HEAD(&conn->requeue);
++
++ /* initialize general immediate & non-immediate PDU commands queue */
++ conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
++ GFP_KERNEL, NULL);
++ if (conn->immqueue == ERR_PTR(-ENOMEM))
++ goto immqueue_alloc_fail;
++
++ conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
++ GFP_KERNEL, NULL);
++ if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
++ goto mgmtqueue_alloc_fail;
++
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+- init_timer(&conn->tmf_timer);
++ init_timer(&conn->tmabort_timer);
++ mutex_init(&conn->xmitmutex);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
++ kfifo_free(conn->mgmtqueue);
++mgmtqueue_alloc_fail:
++ kfifo_free(conn->immqueue);
++immqueue_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2142,7 +1558,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_session *session = conn->session;
+ unsigned long flags;
+
+- del_timer_sync(&conn->transport_timer);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++ mutex_lock(&conn->xmitmutex);
+
+ spin_lock_bh(&session->lock);
+ conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
+@@ -2155,6 +1572,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
++ mutex_unlock(&conn->xmitmutex);
++
+ /*
+ * Block until all in-progress commands for this connection
+ * time out or fail.
+@@ -2167,10 +1586,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_irqrestore(session->host->host_lock, flags);
+ msleep_interruptible(500);
+- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
+- "host_busy %d host_failed %d\n",
+- session->host->host_busy,
+- session->host->host_failed);
++ printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d "
++ "host_failed %d\n", session->host->host_busy,
++ session->host->host_failed);
+ /*
+ * force eh_abort() to unblock
+ */
+@@ -2178,17 +1596,23 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+
+ /* flush queued up work because we free the connection below */
+- iscsi_suspend_tx(conn);
++ scsi_flush_work(session->host);
+
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+- if (session->leadconn == conn)
++ if (session->leadconn == conn) {
+ session->leadconn = NULL;
++ /* no connections exits.. reset sequencing */
++ session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
++ }
+ spin_unlock_bh(&session->lock);
+
++ kfifo_free(conn->immqueue);
++ kfifo_free(conn->mgmtqueue);
++
+ iscsi_destroy_conn(cls_conn);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
+@@ -2199,41 +1623,21 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_session *session = conn->session;
+
+ if (!session) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "can't start unbound connection\n");
++ printk(KERN_ERR "iscsi: can't start unbound connection\n");
+ return -EPERM;
+ }
+
+ if ((session->imm_data_en || !session->initial_r2t_en) &&
+ session->first_burst > session->max_burst) {
+- iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
+- "first_burst %d max_burst %d\n",
+- session->first_burst, session->max_burst);
++ printk("iscsi: invalid burst lengths: "
++ "first_burst %d max_burst %d\n",
++ session->first_burst, session->max_burst);
+ return -EINVAL;
+ }
+
+- if (conn->ping_timeout && !conn->recv_timeout) {
+- iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
+- "zero. Using 5 seconds\n.");
+- conn->recv_timeout = 5;
+- }
+-
+- if (conn->recv_timeout && !conn->ping_timeout) {
+- iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
+- "zero. Using 5 seconds.\n");
+- conn->ping_timeout = 5;
+- }
+-
+ spin_lock_bh(&session->lock);
+ conn->c_stage = ISCSI_CONN_STARTED;
+ session->state = ISCSI_STATE_LOGGED_IN;
+- session->queued_cmdsn = session->cmdsn;
+-
+- conn->last_recv = jiffies;
+- conn->last_ping = jiffies;
+- if (conn->recv_timeout && conn->ping_timeout)
+- mod_timer(&conn->transport_timer,
+- jiffies + (conn->recv_timeout * HZ));
+
+ switch(conn->stop_stage) {
+ case STOP_CONN_RECOVER:
+@@ -2242,11 +1646,13 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ * commands after successful recovery
+ */
+ conn->stop_stage = 0;
+- conn->tmf_state = TMF_INITIAL;
++ conn->tmabort_state = TMABORT_INITIAL;
+ session->age++;
+- if (session->age == 16)
+- session->age = 0;
+- break;
++ spin_unlock_bh(&session->lock);
++
++ iscsi_unblock_session(session_to_cls(session));
++ wake_up(&conn->ehwait);
++ return 0;
+ case STOP_CONN_TERM:
+ conn->stop_stage = 0;
+ break;
+@@ -2255,8 +1661,6 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
+- wake_up(&conn->ehwait);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+@@ -2264,23 +1668,52 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ while (__kfifo_get(conn->immqueue, (void*)&mtask, sizeof(void*)) ||
++ __kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
++ if (mtask == conn->login_mtask)
++ continue;
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ list_del(&mtask->running);
++
++ if (mtask == conn->login_mtask)
++ continue;
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
++ }
++
++ conn->mtask = NULL;
++}
++
++/* Fail commands. Mutex and session lock held and recv side suspended */
++static void fail_all_commands(struct iscsi_conn *conn)
++{
++ struct iscsi_cmd_task *ctask, *tmp;
++
++ /* flush pending */
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
++ ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
++ }
++
++ /* fail all other running */
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ debug_scsi("failing in progress sc %p itt 0x%x\n",
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+
+- conn->task = NULL;
++ conn->ctask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2288,13 +1721,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ {
+ int old_stop_stage;
+
+- del_timer_sync(&conn->transport_timer);
+-
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (conn->stop_stage == STOP_CONN_TERM) {
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return;
+ }
+
+@@ -2311,9 +1740,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ old_stop_stage = conn->stop_stage;
+ conn->stop_stage = flag;
+ conn->c_stage = ISCSI_CONN_STOPPED;
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ spin_unlock_bh(&session->lock);
+
+- iscsi_suspend_tx(conn);
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
++ mutex_lock(&conn->xmitmutex);
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +1760,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2334,11 +1768,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ * flush queues.
+ */
+ spin_lock_bh(&session->lock);
+- fail_all_commands(conn, -1,
+- STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
++ fail_all_commands(conn);
+ flush_control_queues(session, conn);
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
++ mutex_unlock(&conn->xmitmutex);
+ }
+
+ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+@@ -2352,8 +1786,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ iscsi_start_session_recovery(session, conn, flag);
+ break;
+ default:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid stop flag %d\n", flag);
++ printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+@@ -2361,7 +1794,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2387,21 +1820,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ uint32_t value;
+
+ switch(param) {
+- case ISCSI_PARAM_FAST_ABORT:
+- sscanf(buf, "%d", &session->fast_abort);
+- break;
+- case ISCSI_PARAM_ABORT_TMO:
+- sscanf(buf, "%d", &session->abort_timeout);
+- break;
+- case ISCSI_PARAM_LU_RESET_TMO:
+- sscanf(buf, "%d", &session->lu_reset_timeout);
+- break;
+- case ISCSI_PARAM_PING_TMO:
+- sscanf(buf, "%d", &conn->ping_timeout);
+- break;
+- case ISCSI_PARAM_RECV_TMO:
+- sscanf(buf, "%d", &conn->recv_timeout);
+- break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ sscanf(buf, "%d", &conn->max_recv_dlength);
+ break;
+@@ -2449,30 +1867,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ case ISCSI_PARAM_EXP_STATSN:
+ sscanf(buf, "%u", &conn->exp_statsn);
+ break;
+- case ISCSI_PARAM_USERNAME:
+- kfree(session->username);
+- session->username = kstrdup(buf, GFP_KERNEL);
+- if (!session->username)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_USERNAME_IN:
+- kfree(session->username_in);
+- session->username_in = kstrdup(buf, GFP_KERNEL);
+- if (!session->username_in)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_PASSWORD:
+- kfree(session->password);
+- session->password = kstrdup(buf, GFP_KERNEL);
+- if (!session->password)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_PASSWORD_IN:
+- kfree(session->password_in);
+- session->password_in = kstrdup(buf, GFP_KERNEL);
+- if (!session->password_in)
+- return -ENOMEM;
+- break;
+ case ISCSI_PARAM_TARGET_NAME:
+ /* this should not change between logins */
+ if (session->targetname)
+@@ -2500,14 +1894,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,19 +1905,11 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+- case ISCSI_PARAM_FAST_ABORT:
+- len = sprintf(buf, "%d\n", session->fast_abort);
+- break;
+- case ISCSI_PARAM_ABORT_TMO:
+- len = sprintf(buf, "%d\n", session->abort_timeout);
+- break;
+- case ISCSI_PARAM_LU_RESET_TMO:
+- len = sprintf(buf, "%d\n", session->lu_reset_timeout);
+- break;
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ len = sprintf(buf, "%d\n", session->initial_r2t_en);
+ break;
+@@ -2562,27 +1940,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_TPGT:
+ len = sprintf(buf, "%d\n", session->tpgt);
+ break;
+- case ISCSI_PARAM_USERNAME:
+- len = sprintf(buf, "%s\n", session->username);
+- break;
+- case ISCSI_PARAM_USERNAME_IN:
+- len = sprintf(buf, "%s\n", session->username_in);
+- break;
+- case ISCSI_PARAM_PASSWORD:
+- len = sprintf(buf, "%s\n", session->password);
+- break;
+- case ISCSI_PARAM_PASSWORD_IN:
+- len = sprintf(buf, "%s\n", session->password_in);
+- break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2598,12 +1955,6 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ int len;
+
+ switch(param) {
+- case ISCSI_PARAM_PING_TMO:
+- len = sprintf(buf, "%u\n", conn->ping_timeout);
+- break;
+- case ISCSI_PARAM_RECV_TMO:
+- len = sprintf(buf, "%u\n", conn->recv_timeout);
+- break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ len = sprintf(buf, "%u\n", conn->max_recv_dlength);
+ break;
+@@ -2639,72 +1990,6 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+
+-int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+- char *buf)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
+- int len;
+-
+- switch (param) {
+- case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- len = sprintf(buf, "%s\n", "default");
+- else
+- len = sprintf(buf, "%s\n", ihost->netdev);
+- break;
+- case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- len = sprintf(buf, "%s\n", "default");
+- else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
+- break;
+- case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
+- break;
+- default:
+- return -ENOSYS;
+- }
+-
+- return len;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+-
+-int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+- char *buf, int buflen)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
+-
+- switch (param) {
+- case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+- default:
+- return -ENOSYS;
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_set_param);
+-
+ MODULE_AUTHOR("Mike Christie");
+ MODULE_DESCRIPTION("iSCSI library functions");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..caf1836 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,28 +30,26 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
+-#define ISCSI_CONN_ATTRS 13
+-#define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_SESSION_ATTRS 11
++#define ISCSI_CONN_ATTRS 11
++#define ISCSI_HOST_ATTRS 0
++#define ISCSI_TRANSPORT_VERSION "2.0-724"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+-static struct workqueue_struct *iscsi_eh_timer_workq;
+
+ /*
+ * list of registered transports and lock that must
+@@ -64,12 +62,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +77,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,163 +115,22 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+-
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+-
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
+-
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
+- ihost->scan_workq = create_singlethread_workqueue(
+- ihost->scan_workq_name);
+- if (!ihost->scan_workq)
+- return -ENOMEM;
+- return 0;
+-}
+-
+-static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
+-{
+- struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+-
+- destroy_workqueue(ihost->scan_workq);
+ return 0;
+ }
+
+ static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+ "iscsi_host",
+ iscsi_setup_host,
+- iscsi_remove_host,
++ NULL,
+ NULL);
+
+ static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+@@ -340,54 +201,6 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
+ * The following functions can be used by LLDs that allocate
+ * their own scsi_hosts or by software iscsi LLDs
+ */
+-static struct {
+- int value;
+- char *name;
+-} iscsi_session_state_names[] = {
+- { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
+- { ISCSI_SESSION_FAILED, "FAILED" },
+- { ISCSI_SESSION_FREE, "FREE" },
+-};
+-
+-static const char *iscsi_session_state_name(int state)
+-{
+- int i;
+- char *name = NULL;
+-
+- for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
+- if (iscsi_session_state_names[i].value == state) {
+- name = iscsi_session_state_names[i].name;
+- break;
+- }
+- }
+- return name;
+-}
+-
+-int iscsi_session_chkready(struct iscsi_cls_session *session)
+-{
+- unsigned long flags;
+- int err;
+-
+- spin_lock_irqsave(&session->lock, flags);
+- switch (session->state) {
+- case ISCSI_SESSION_LOGGED_IN:
+- err = 0;
+- break;
+- case ISCSI_SESSION_FAILED:
+- err = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_SESSION_FREE:
+- err = DID_NO_CONNECT << 16;
+- break;
+- default:
+- err = DID_NO_CONNECT << 16;
+- break;
+- }
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_session_chkready);
+-
+ static void iscsi_session_release(struct device *dev)
+ {
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
+@@ -403,114 +216,22 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+-/**
+- * iscsi_scan_finished - helper to report when running scans are done
+- * @shost: scsi host
+- * @time: scan run time
+- *
+- * This function can be used by drives like qla4xxx to report to the scsi
+- * layer when the scans it kicked off at module load time are done.
+- */
+-int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+-{
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- /*
+- * qla4xxx will have kicked off some session unblocks before calling
+- * scsi_scan_host, so just wait for them to complete.
+- */
+- return !atomic_read(&ihost->nr_scans);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+-
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+-
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
+-}
+-
+-static void iscsi_scan_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session, scan_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
+-
+- iscsi_user_scan_session(&session->dev, &scan_data);
+- atomic_dec(&ihost->nr_scans);
++ return 0;
+ }
+
+ static void session_recovery_timedout(struct work_struct *work)
+@@ -518,24 +239,9 @@ static void session_recovery_timedout(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ recovery_work.work);
+- unsigned long flags;
+-
+- iscsi_cls_session_printk(KERN_INFO, session,
+- "session recovery timed out after %d secs\n",
+- session->recovery_tmo);
+
+- spin_lock_irqsave(&session->lock, flags);
+- switch (session->state) {
+- case ISCSI_SESSION_FAILED:
+- session->state = ISCSI_SESSION_FREE;
+- break;
+- case ISCSI_SESSION_LOGGED_IN:
+- case ISCSI_SESSION_FREE:
+- /* we raced with the unblock's flush */
+- spin_unlock_irqrestore(&session->lock, flags);
+- return;
+- }
+- spin_unlock_irqrestore(&session->lock, flags);
++ dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
++ "out after %d secs\n", session->recovery_tmo);
+
+ if (session->transport->session_recovery_timedout)
+ session->transport->session_recovery_timedout(session);
+@@ -543,201 +249,73 @@ static void session_recovery_timedout(struct work_struct *work)
+ scsi_target_unblock(&session->dev);
+ }
+
+-static void __iscsi_unblock_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- unblock_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+-
+- /*
+- * The recovery and unblock work get run from the same workqueue,
+- * so try to cancel it if it was going to run after this unblock.
+- */
+- cancel_delayed_work(&session->recovery_work);
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_LOGGED_IN;
+- spin_unlock_irqrestore(&session->lock, flags);
+- /* start IO */
+- scsi_target_unblock(&session->dev);
+- /*
+- * Only do kernel scanning if the driver is properly hooked into
+- * the async scanning code (drivers like iscsi_tcp do login and
+- * scanning from userspace).
+- */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
+-}
+-
+-/**
+- * iscsi_unblock_session - set a session as logged in and start IO.
+- * @session: iscsi session
+- *
+- * Mark a session as ready to accept IO.
+- */
+ void iscsi_unblock_session(struct iscsi_cls_session *session)
+ {
+- queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+- /*
+- * make sure all the events have completed before tell the driver
+- * it is safe
+- */
+- flush_workqueue(iscsi_eh_timer_workq);
++ if (!cancel_delayed_work(&session->recovery_work))
++ flush_scheduled_work();
++ scsi_target_unblock(&session->dev);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_unblock_session);
+
+-static void __iscsi_block_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- block_work);
+- unsigned long flags;
+-
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_FAILED;
+- spin_unlock_irqrestore(&session->lock, flags);
+- scsi_target_block(&session->dev);
+- queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
+- session->recovery_tmo * HZ);
+-}
+-
+ void iscsi_block_session(struct iscsi_cls_session *session)
+ {
+- queue_work(iscsi_eh_timer_workq, &session->block_work);
++ scsi_target_block(&session->dev);
++ schedule_delayed_work(&session->recovery_work,
++ session->recovery_tmo * HZ);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_block_session);
+
+-static void __iscsi_unbind_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- unbind_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+-
+- /* Prevent new scans and make sure scanning is not in progress */
+- mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return;
+- }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+-
+- scsi_remove_target(&session->dev);
+- iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+-}
+-
+-static int iscsi_unbind_session(struct iscsi_cls_session *session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+-
+- return queue_work(ihost->scan_workq, &session->unbind_work);
+-}
+-
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+
+ session->transport = transport;
+ session->recovery_tmo = 120;
+- session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+- INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+- INIT_WORK(&session->block_work, __iscsi_block_session);
+- INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
+- INIT_WORK(&session->scan_work, iscsi_scan_session);
+- spin_lock_init(&session->lock);
+
+ /* this is released in the dev's release function */
+ scsi_host_get(shost);
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id = target_id;
++ struct iscsi_host *ihost;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+ err = device_add(&session->dev);
+ if (err) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "could not register session's dev\n");
++ dev_printk(KERN_ERR, &session->dev, "iscsi: could not "
++ "register session's dev\n");
+ goto release_host;
+ }
+ transport_register_device(&session->dev);
+
+- spin_lock_irqsave(&sesslock, flags);
+- list_add(&session->sess_list, &sesslist);
+- spin_unlock_irqrestore(&sesslock, flags);
+-
+- iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
+ return 0;
+
+ release_host:
+@@ -750,18 +328,17 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+- * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -773,65 +350,19 @@ iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_create_session);
+
+-static void iscsi_conn_release(struct device *dev)
+-{
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
+- struct device *parent = conn->dev.parent;
+-
+- kfree(conn);
+- put_device(parent);
+-}
+-
+-static int iscsi_is_conn_dev(const struct device *dev)
+-{
+- return dev->release == iscsi_conn_release;
+-}
+-
+-static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+-{
+- if (!iscsi_is_conn_dev(dev))
+- return 0;
+- return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+-}
+-
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+- int err;
+-
+- spin_lock_irqsave(&sesslock, flags);
+- list_del(&session->sess_list);
+- spin_unlock_irqrestore(&sesslock, flags);
++ struct iscsi_host *ihost = shost->shost_data;
+
+- /* make sure there are no blocks/unblocks queued */
+- flush_workqueue(iscsi_eh_timer_workq);
+- /* make sure the timedout callout is not running */
+ if (!cancel_delayed_work(&session->recovery_work))
+- flush_workqueue(iscsi_eh_timer_workq);
+- /*
+- * If we are blocked let commands flow again. The lld or iscsi
+- * layer should set up the queuecommand to fail commands.
+- * We assume that LLD will not be calling block/unblock while
+- * removing the session.
+- */
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_FREE;
+- spin_unlock_irqrestore(&session->lock, flags);
++ flush_scheduled_work();
+
+- scsi_target_unblock(&session->dev);
+- /* flush running scans then delete devices */
+- flush_workqueue(ihost->scan_workq);
+- __iscsi_unbind_session(&session->unbind_work);
++ mutex_lock(&ihost->mutex);
++ list_del(&session->host_list);
++ mutex_unlock(&ihost->mutex);
+
+- /* hw iscsi may not have removed all connections from session */
+- err = device_for_each_child(&session->dev, NULL,
+- iscsi_iter_destroy_conn_fn);
+- if (err)
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Could not delete all connections "
+- "for session. Error %d.\n", err);
++ scsi_remove_target(&session->dev);
+
+ transport_unregister_device(&session->dev);
+ device_del(&session->dev);
+@@ -840,9 +371,9 @@ EXPORT_SYMBOL_GPL(iscsi_remove_session);
+
+ void iscsi_free_session(struct iscsi_cls_session *session)
+ {
+- iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
+ put_device(&session->dev);
+ }
++
+ EXPORT_SYMBOL_GPL(iscsi_free_session);
+
+ /**
+@@ -851,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session);
+ *
+ * Can be called by a LLD or iscsi_transport. There must not be
+ * any running connections.
+- */
++ **/
+ int iscsi_destroy_session(struct iscsi_cls_session *session)
+ {
+ iscsi_remove_session(session);
+@@ -860,10 +391,23 @@ int iscsi_destroy_session(struct iscsi_cls_session *session)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+
++static void iscsi_conn_release(struct device *dev)
++{
++ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
++ struct device *parent = conn->dev.parent;
++
++ kfree(conn);
++ put_device(parent);
++}
++
++static int iscsi_is_conn_dev(const struct device *dev)
++{
++ return dev->release == iscsi_conn_release;
++}
++
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -874,19 +418,19 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * for software iscsi we could be trying to preallocate a connection struct
+ * in which case there could be two connection structs and cid would be
+ * non-zero.
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+- unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -903,16 +447,11 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+ conn->dev.release = iscsi_conn_release;
+ err = device_register(&conn->dev);
+ if (err) {
+- iscsi_cls_session_printk(KERN_ERR, session, "could not "
+- "register connection's dev\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register "
++ "connection's dev\n");
+ goto release_parent_ref;
+ }
+ transport_register_device(&conn->dev);
+-
+- spin_lock_irqsave(&connlock, flags);
+- list_add(&conn->conn_list, &connlist);
+- conn->active = 1;
+- spin_unlock_irqrestore(&connlock, flags);
+ return conn;
+
+ release_parent_ref:
+@@ -926,23 +465,17 @@ EXPORT_SYMBOL_GPL(iscsi_create_conn);
+
+ /**
+ * iscsi_destroy_conn - destroy iscsi class connection
+- * @conn: iscsi cls session
++ * @session: iscsi cls session
+ *
+ * This can be called from a LLD or iscsi_transport.
+- */
++ **/
+ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+ {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&connlock, flags);
+- conn->active = 0;
+- list_del(&conn->conn_list);
+- spin_unlock_irqrestore(&connlock, flags);
+-
+ transport_unregister_device(&conn->dev);
+ device_unregister(&conn->dev);
+ return 0;
+ }
++
+ EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+
+ /*
+@@ -1011,8 +544,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
+- iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
+- "control PDU: OOM\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
++ "control PDU: OOM\n");
+ return -ENOMEM;
+ }
+
+@@ -1045,8 +578,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+- iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
+- "conn error (%d)\n", error);
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored "
++ "conn error (%d)\n", error);
+ return;
+ }
+
+@@ -1060,8 +593,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+
+ iscsi_broadcast_skb(skb, GFP_ATOMIC);
+
+- iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
+- error);
++ dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
++ error);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_error);
+
+@@ -1076,10 +609,12 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
+ int t = done ? NLMSG_DONE : type;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+- if (!skb) {
+- printk(KERN_ERR "Could not allocate skb to send reply.\n");
+- return -ENOMEM;
+- }
++ /*
++ * FIXME:
++ * user is supposed to react on iferror == -ENOMEM;
++ * see iscsi_if_rx().
++ */
++ BUG_ON(!skb);
+
+ nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
+ nlh->nlmsg_flags = flags;
+@@ -1116,8 +651,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+
+ skbstat = alloc_skb(len, GFP_ATOMIC);
+ if (!skbstat) {
+- iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
+- "deliver stats: OOM\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: can not "
++ "deliver stats: OOM\n");
+ return -ENOMEM;
+ }
+
+@@ -1152,87 +687,145 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+ }
+
+ /**
+- * iscsi_session_event - send session destr. completion event
+- * @session: iscsi class session
+- * @event: type of event
+- */
+-int iscsi_session_event(struct iscsi_cls_session *session,
+- enum iscsi_uevent_e event)
++ * iscsi_if_destroy_session_done - send session destr. completion event
++ * @conn: last connection for session
++ *
++ * This is called by HW iscsi LLDs to notify userpsace that its HW has
++ * removed a session.
++ **/
++int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
+ {
+ struct iscsi_internal *priv;
++ struct iscsi_cls_session *session;
+ struct Scsi_Host *shost;
+ struct iscsi_uevent *ev;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
++ unsigned long flags;
+ int rc, len = NLMSG_SPACE(sizeof(*ev));
+
+- priv = iscsi_if_transport_lookup(session->transport);
++ priv = iscsi_if_transport_lookup(conn->transport);
+ if (!priv)
+ return -EINVAL;
++
++ session = iscsi_dev_to_session(conn->dev.parent);
+ shost = iscsi_session_to_shost(session);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Cannot notify userspace of session "
+- "event %u\n", event);
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event\n");
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = NLMSG_DATA(nlh);
+- ev->transport_handle = iscsi_handle(session->transport);
++ ev->transport_handle = iscsi_handle(conn->transport);
++ ev->type = ISCSI_KEVENT_DESTROY_SESSION;
++ ev->r.d_session.host_no = shost->host_no;
++ ev->r.d_session.sid = session->sid;
+
+- ev->type = event;
+- switch (event) {
+- case ISCSI_KEVENT_DESTROY_SESSION:
+- ev->r.d_session.host_no = shost->host_no;
+- ev->r.d_session.sid = session->sid;
+- break;
+- case ISCSI_KEVENT_CREATE_SESSION:
+- ev->r.c_session_ret.host_no = shost->host_no;
+- ev->r.c_session_ret.sid = session->sid;
+- break;
+- case ISCSI_KEVENT_UNBIND_SESSION:
+- ev->r.unbind_session.host_no = shost->host_no;
+- ev->r.unbind_session.sid = session->sid;
+- break;
+- default:
+- iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
+- "%u.\n", event);
+- kfree_skb(skb);
++ /*
++ * this will occur if the daemon is not up, so we just warn
++ * the user and when the daemon is restarted it will handle it
++ */
++ rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
++ if (rc < 0)
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session destruction event. Check iscsi daemon\n");
++
++ spin_lock_irqsave(&sesslock, flags);
++ list_del(&session->sess_list);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ spin_lock_irqsave(&connlock, flags);
++ conn->active = 0;
++ list_del(&conn->conn_list);
++ spin_unlock_irqrestore(&connlock, flags);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(iscsi_if_destroy_session_done);
++
++/**
++ * iscsi_if_create_session_done - send session creation completion event
++ * @conn: leading connection for session
++ *
++ * This is called by HW iscsi LLDs to notify userpsace that its HW has
++ * created a session or a existing session is back in the logged in state.
++ **/
++int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
++{
++ struct iscsi_internal *priv;
++ struct iscsi_cls_session *session;
++ struct Scsi_Host *shost;
++ struct iscsi_uevent *ev;
++ struct sk_buff *skb;
++ struct nlmsghdr *nlh;
++ unsigned long flags;
++ int rc, len = NLMSG_SPACE(sizeof(*ev));
++
++ priv = iscsi_if_transport_lookup(conn->transport);
++ if (!priv)
+ return -EINVAL;
++
++ session = iscsi_dev_to_session(conn->dev.parent);
++ shost = iscsi_session_to_shost(session);
++
++ skb = alloc_skb(len, GFP_KERNEL);
++ if (!skb) {
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event\n");
++ return -ENOMEM;
+ }
+
++ nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
++ ev = NLMSG_DATA(nlh);
++ ev->transport_handle = iscsi_handle(conn->transport);
++ ev->type = ISCSI_UEVENT_CREATE_SESSION;
++ ev->r.c_session_ret.host_no = shost->host_no;
++ ev->r.c_session_ret.sid = session->sid;
++
+ /*
+ * this will occur if the daemon is not up, so we just warn
+ * the user and when the daemon is restarted it will handle it
+ */
+ rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
+ if (rc < 0)
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Cannot notify userspace of session "
+- "event %u. Check iscsi daemon\n",
+- event);
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event. Check iscsi daemon\n");
++
++ spin_lock_irqsave(&sesslock, flags);
++ list_add(&session->sess_list, &sesslist);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ spin_lock_irqsave(&connlock, flags);
++ list_add(&conn->conn_list, &connlist);
++ conn->active = 1;
++ spin_unlock_irqrestore(&connlock, flags);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_session_event);
++EXPORT_SYMBOL_GPL(iscsi_if_create_session_done);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ unsigned long flags;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ spin_lock_irqsave(&sesslock, flags);
++ list_add(&session->sess_list, &sesslist);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1242,34 +835,47 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
+ struct iscsi_cls_conn *conn;
+ struct iscsi_cls_session *session;
++ unsigned long flags;
+
+ session = iscsi_session_lookup(ev->u.c_conn.sid);
+ if (!session) {
+- printk(KERN_ERR "iscsi: invalid session %d.\n",
++ printk(KERN_ERR "iscsi: invalid session %d\n",
+ ev->u.c_conn.sid);
+ return -EINVAL;
+ }
+
+ conn = transport->create_conn(session, ev->u.c_conn.cid);
+ if (!conn) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "couldn't create a new connection.");
++ printk(KERN_ERR "iscsi: couldn't create a new "
++ "connection for session %d\n",
++ session->sid);
+ return -ENOMEM;
+ }
+
+ ev->r.c_conn_ret.sid = session->sid;
+ ev->r.c_conn_ret.cid = conn->cid;
++
++ spin_lock_irqsave(&connlock, flags);
++ list_add(&conn->conn_list, &connlist);
++ conn->active = 1;
++ spin_unlock_irqrestore(&connlock, flags);
++
+ return 0;
+ }
+
+ static int
+ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
++ unsigned long flags;
+ struct iscsi_cls_conn *conn;
+
+ conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
+ if (!conn)
+ return -EINVAL;
++ spin_lock_irqsave(&connlock, flags);
++ conn->active = 0;
++ list_del(&conn->conn_list);
++ spin_unlock_irqrestore(&connlock, flags);
+
+ if (transport->destroy_conn)
+ transport->destroy_conn(conn);
+@@ -1307,7 +913,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +922,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1353,50 +947,15 @@ static int
+ iscsi_tgt_dscvr(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+ {
+- struct Scsi_Host *shost;
+ struct sockaddr *dst_addr;
+- int err;
+
+ if (!transport->tgt_dscvr)
+ return -EINVAL;
+
+- shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
+- if (IS_ERR(shost)) {
+- printk(KERN_ERR "target discovery could not find host no %u\n",
+- ev->u.tgt_dscvr.host_no);
+- return -ENODEV;
+- }
+-
+-
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
+- ev->u.tgt_dscvr.enable, dst_addr);
+- scsi_host_put(shost);
+- return err;
+-}
+-
+-static int
+-iscsi_set_host_param(struct iscsi_transport *transport,
+- struct iscsi_uevent *ev)
+-{
+- char *data = (char*)ev + sizeof(*ev);
+- struct Scsi_Host *shost;
+- int err;
+-
+- if (!transport->set_host_param)
+- return -ENOSYS;
+-
+- shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+- if (IS_ERR(shost)) {
+- printk(KERN_ERR "set_host_param could not find host no %u\n",
+- ev->u.set_host_param.host_no);
+- return -ENODEV;
+- }
+-
+- err = transport->set_host_param(shost, ev->u.set_host_param.param,
+- data, ev->u.set_host_param.len);
+- scsi_host_put(shost);
+- return err;
++ return transport->tgt_dscvr(ev->u.tgt_dscvr.type,
++ ev->u.tgt_dscvr.host_no,
++ ev->u.tgt_dscvr.enable, dst_addr);
+ }
+
+ static int
+@@ -1408,7 +967,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
++ unsigned long flags;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,35 +981,17 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+- if (session)
++ if (session) {
++ spin_lock_irqsave(&sesslock, flags);
++ list_del(&session->sess_list);
++ spin_unlock_irqrestore(&sesslock, flags);
++
+ transport->destroy_session(session);
+- else
+- err = -EINVAL;
+- break;
+- case ISCSI_UEVENT_UNBIND_SESSION:
+- session = iscsi_session_lookup(ev->u.d_session.sid);
+- if (session)
+- iscsi_unbind_session(session);
+- else
++ } else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_CREATE_CONN:
+@@ -1508,11 +1049,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ case ISCSI_UEVENT_TGT_DSCVR:
+ err = iscsi_tgt_dscvr(transport, ev);
+ break;
+- case ISCSI_UEVENT_SET_HOST_PARAM:
+- err = iscsi_set_host_param(transport, ev);
+- break;
+ default:
+- err = -ENOSYS;
++ err = -EINVAL;
+ break;
+ }
+
+@@ -1521,55 +1059,70 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ }
+
+ /*
+- * Get message from skb. Each message is processed by iscsi_if_recv_msg.
+- * Malformed skbs with wrong lengths or invalid creds are not processed.
++ * Get message from skb (based on rtnetlink_rcv_skb). Each message is
++ * processed by iscsi_if_recv_msg. Malformed skbs with wrong lengths or
++ * invalid creds are discarded silently.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1130,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1601,68 +1153,43 @@ iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
+ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
+ iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
+ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+-iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+-iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
++
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
+
+ /*
+ * iSCSI session attrs
+ */
+-#define iscsi_session_attr_show(param, perm) \
++#define iscsi_session_attr_show(param) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+- \
+- if (perm && !capable(CAP_SYS_ADMIN)) \
+- return -EACCES; \
+ return t->get_session_param(session, param, buf); \
+ }
+
+-#define iscsi_session_attr(field, param, perm) \
+- iscsi_session_attr_show(param, perm) \
++#define iscsi_session_attr(field, param) \
++ iscsi_session_attr_show(param) \
+ static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
+ NULL);
+
+-iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
+-iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
+-iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
+-iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
+-iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
+-iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
+-iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
+-iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
+-iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
+-iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
+-iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
+-iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
+-iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
+-iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+-iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+-iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+-iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+-
+-static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
+-{
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+- return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+-}
+-static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+- NULL);
++iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME);
++iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN);
++iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T);
++iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN);
++iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST);
++iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST);
++iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN);
++iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN);
++iscsi_session_attr(erl, ISCSI_PARAM_ERL);
++iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT);
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1672,32 +1199,9 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
+ NULL)
+ iscsi_priv_session_attr(recovery_tmo, "%d");
+
+-/*
+- * iSCSI host attrs
+- */
+-#define iscsi_host_attr_show(param) \
+-static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
+-{ \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
+- struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+- return priv->iscsi_transport->get_host_param(shost, param, buf); \
+-}
+-
+-#define iscsi_host_attr(field, param) \
+- iscsi_host_attr_show(param) \
+-static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
+- NULL);
+-
+-iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
+-iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
+-iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
+-iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+-
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1209,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,15 +1217,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
+- count++; \
+- } \
+-} while (0)
+-
+-#define SETUP_HOST_RD_ATTR(field, param_flag) \
+-do { \
+- if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,34 +1307,25 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
++ priv->host_attrs[0] = NULL;
+ transport_container_register(&priv->t.host_attrs);
+
+- SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+- SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
+- SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
+- SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
+- BUG_ON(count > ISCSI_HOST_ATTRS);
+- priv->host_attrs[count] = NULL;
+- count = 0;
+-
+ /* connection parameters */
+ priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
+ priv->conn_cont.ac.class = &iscsi_connection_class.class;
+@@ -1856,8 +1343,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
+ SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
+ SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
+- SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO);
+- SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO);
+
+ BUG_ON(count > ISCSI_CONN_ATTRS);
+ priv->conn_attrs[count] = NULL;
+@@ -1879,17 +1364,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
+ SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
+ SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
+- SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
+- SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
+- SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
+- SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
+- SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+- SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+- SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+- SETUP_PRIV_SESSION_RD_ATTR(state);
+
+ BUG_ON(count > ISCSI_SESSION_ATTRS);
+ priv->session_attrs[count] = NULL;
+@@ -1901,9 +1376,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1404,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1425,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,29 +1437,21 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+ }
+
+- iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+- if (!iscsi_eh_timer_workq)
+- goto release_nls;
+-
+ return 0;
+
+-release_nls:
+- netlink_kernel_release(nls);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -1997,12 +1459,10 @@ unregister_transport_class:
+
+ static void __exit iscsi_transport_exit(void)
+ {
+- destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..55ebf03 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -48,17 +48,12 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14,
+
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+- ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+- ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+ ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2,
+ ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3,
+ ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
+- ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5,
+- ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6,
+ };
+
+ enum iscsi_tgt_dscvr {
+@@ -76,15 +71,7 @@ struct iscsi_uevent {
+ /* messages u -> k */
+ struct msg_create_session {
+ uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -149,11 +136,6 @@ struct iscsi_uevent {
+ */
+ uint32_t enable;
+ } tgt_dscvr;
+- struct msg_set_host_param {
+- uint32_t host_no;
+- uint32_t param; /* enum iscsi_host_param */
+- uint32_t len;
+- } set_host_param;
+ } u;
+ union {
+ /* messages k -> u */
+@@ -166,10 +148,6 @@ struct iscsi_uevent {
+ uint32_t sid;
+ uint32_t cid;
+ } c_conn_ret;
+- struct msg_unbind_session {
+- uint32_t sid;
+- uint32_t host_no;
+- } unbind_session;
+ struct msg_recv_req {
+ uint32_t sid;
+ uint32_t cid;
+@@ -245,78 +223,42 @@ enum iscsi_param {
+ ISCSI_PARAM_CONN_PORT,
+ ISCSI_PARAM_CONN_ADDRESS,
+
+- ISCSI_PARAM_USERNAME,
+- ISCSI_PARAM_USERNAME_IN,
+- ISCSI_PARAM_PASSWORD,
+- ISCSI_PARAM_PASSWORD_IN,
+-
+- ISCSI_PARAM_FAST_ABORT,
+- ISCSI_PARAM_ABORT_TMO,
+- ISCSI_PARAM_LU_RESET_TMO,
+- ISCSI_PARAM_HOST_RESET_TMO,
+-
+- ISCSI_PARAM_PING_TMO,
+- ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
+-
+-/* iSCSI HBA params */
+-enum iscsi_host_param {
+- ISCSI_HOST_PARAM_HWADDRESS,
+- ISCSI_HOST_PARAM_INITIATOR_NAME,
+- ISCSI_HOST_PARAM_NETDEV_NAME,
+- ISCSI_HOST_PARAM_IPADDRESS,
+- ISCSI_HOST_PARAM_MAX,
+-};
+-
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+@@ -330,9 +272,6 @@ enum iscsi_host_param {
+ #define CAP_MULTI_CONN 0x40
+ #define CAP_TEXT_NEGO 0x80
+ #define CAP_MARKERS 0x100
+-#define CAP_FW_DB 0x200
+-#define CAP_SENDTARGETS_OFFLOAD 0x400
+-#define CAP_DATA_PATH_OFFLOAD 0x800
+
+ /*
+ * These flags describes reason of stop_conn() call
+diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
+index f2a2c11..8d1e4e8 100644
+--- a/include/scsi/iscsi_proto.h
++++ b/include/scsi/iscsi_proto.h
+@@ -21,16 +21,13 @@
+ #ifndef ISCSI_PROTO_H
+ #define ISCSI_PROTO_H
+
+-#include <linux/types.h>
+-#include <scsi/scsi.h>
+-
+ #define ISCSI_DRAFT20_VERSION 0x00
+
+ /* default iSCSI listen port for incoming connections */
+ #define ISCSI_LISTEN_PORT 3260
+
+ /* Padding word length */
+-#define ISCSI_PAD_LEN 4
++#define PAD_WORD_LEN 4
+
+ /*
+ * useful common(control and data pathes) macro
+@@ -46,8 +43,8 @@
+ /* initiator tags; opaque for target */
+ typedef uint32_t __bitwise__ itt_t;
+ /* below makes sense only for initiator that created this tag */
+-#define build_itt(itt, age) ((__force itt_t)\
+- ((itt) | ((age) << ISCSI_AGE_SHIFT)))
++#define build_itt(itt, id, age) ((__force itt_t)\
++ ((itt) | ((id) << ISCSI_CID_SHIFT) | ((age) << ISCSI_AGE_SHIFT)))
+ #define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK)
+ #define RESERVED_ITT ((__force itt_t)0xffffffff)
+
+@@ -113,7 +110,6 @@ struct iscsi_ahs_hdr {
+
+ #define ISCSI_AHSTYPE_CDB 1
+ #define ISCSI_AHSTYPE_RLENGTH 2
+-#define ISCSI_CDB_SIZE 16
+
+ /* iSCSI PDU Header */
+ struct iscsi_cmd {
+@@ -127,7 +123,7 @@ struct iscsi_cmd {
+ __be32 data_length;
+ __be32 cmdsn;
+ __be32 exp_statsn;
+- uint8_t cdb[ISCSI_CDB_SIZE]; /* SCSI Command Block */
++ uint8_t cdb[16]; /* SCSI Command Block */
+ /* Additional Data (Command Dependent) */
+ };
+
+@@ -151,15 +147,6 @@ struct iscsi_rlength_ahdr {
+ __be32 read_length;
+ };
+
+-/* Extended CDB AHS */
+-struct iscsi_ecdb_ahdr {
+- __be16 ahslength; /* CDB length - 15, including reserved byte */
+- uint8_t ahstype;
+- uint8_t reserved;
+- /* 4-byte aligned extended CDB spillover */
+- uint8_t ecdb[SCSI_MAX_VARLEN_CDB_SIZE - ISCSI_CDB_SIZE];
+-};
+-
+ /* SCSI Response Header */
+ struct iscsi_cmd_rsp {
+ uint8_t opcode;
+@@ -613,8 +600,6 @@ struct iscsi_reject {
+ #define ISCSI_MIN_MAX_BURST_LEN 512
+ #define ISCSI_MAX_MAX_BURST_LEN 16777215
+
+-#define ISCSI_DEF_TIME2WAIT 2
+-
+ /************************* RFC 3720 End *****************************/
+
+ #endif /* ISCSI_PROTO_H */
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..ea0816d 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -51,68 +48,69 @@ struct device;
+ #define debug_scsi(fmt...)
+ #endif
+
+-#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_XMIT_CMDS_MAX 128 /* must be power of 2 */
++#define ISCSI_MGMT_CMDS_MAX 32 /* must be power of 2 */
++#define ISCSI_CONN_MAX 1
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+
+ /* Task Mgmt states */
+-enum {
+- TMF_INITIAL,
+- TMF_QUEUED,
+- TMF_SUCCESS,
+- TMF_FAILED,
+- TMF_TIMEDOUT,
+- TMF_NOT_FOUND,
+-};
++#define TMABORT_INITIAL 0x0
++#define TMABORT_SUCCESS 0x1
++#define TMABORT_FAILED 0x2
++#define TMABORT_TIMEDOUT 0x3
++#define TMABORT_NOT_FOUND 0x4
+
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
++#define ISCSI_CID_SHIFT 12
++#define ISCSI_CID_MASK (0xffff << ISCSI_CID_SHIFT)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+-#define ISCSI_ADDRESS_BUF_LEN 64
+-
+-enum {
+- /* this is the maximum possible storage for AHSs */
+- ISCSI_MAX_AHS_SIZE = sizeof(struct iscsi_ecdb_ahdr) +
+- sizeof(struct iscsi_rlength_ahdr),
+- ISCSI_DIGEST_SIZE = sizeof(__u32),
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ int data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
+ };
+
+-
+ enum {
+ ISCSI_TASK_COMPLETED,
+ ISCSI_TASK_PENDING,
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+- * Because LLDs allocate their hdr differently, this is a pointer
+- * and length to that storage. It must be setup at session
+- * creation time.
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
+ */
+ struct iscsi_cmd *hdr;
+- unsigned short hdr_max;
+- unsigned short hdr_len; /* accumulated size of hdr used */
+ int itt; /* this ITT */
++ int datasn; /* DataSN */
+
+ uint32_t unsol_datasn;
+- unsigned imm_count; /* imm-data (bytes) */
+- unsigned unsol_count; /* unsolicited (bytes)*/
++ int imm_count; /* imm-data (bytes) */
++ int unsol_count; /* unsolicited (bytes)*/
+ /* offset in unsolicited stream (bytes); */
+- unsigned unsol_offset;
+- unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
++ int unsol_offset;
++ int data_count; /* remaining Data-Out */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
++ int total_length;
+ struct iscsi_conn *conn; /* used connection */
++ struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */
+
+ /* state set/tested under session->lock */
+ int state;
+@@ -121,33 +119,19 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
+-{
+- return (void*)task->hdr + task->hdr_len;
+-}
+-
+-/* Connection's states */
+-enum {
+- ISCSI_CONN_INITIAL_STAGE,
+- ISCSI_CONN_STARTED,
+- ISCSI_CONN_STOPPED,
+- ISCSI_CONN_CLEANUP_WAIT,
+-};
+-
+ struct iscsi_conn {
+ struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+- struct timer_list transport_timer;
+- unsigned long last_recv;
+- unsigned long last_ping;
+- int ping_timeout;
+- int recv_timeout;
+- struct iscsi_task *ping_task;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,28 +147,35 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+- struct list_head mgmtqueue; /* mgmt (control) xmit queue */
++ struct kfifo *immqueue; /* immediate xmit queue */
++ struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */
+ struct list_head mgmt_run_list; /* list of control tasks */
+ struct list_head xmitqueue; /* data-path cmd queue */
+ struct list_head run_list; /* list of cmds in progress */
+- struct list_head requeue; /* tasks needing another run */
+ struct work_struct xmitwork; /* per-conn. xmit workqueue */
++ /*
++ * serializes connection xmit, access to kfifos:
++ * xmitqueue, immqueue, mgmtqueue
++ */
++ struct mutex xmitmutex;
++
+ unsigned long suspend_tx; /* suspend Tx */
+ unsigned long suspend_rx; /* suspend Rx */
+
+ /* abort */
+ wait_queue_head_t ehwait; /* used in eh_abort() */
+ struct iscsi_tm tmhdr;
+- struct timer_list tmf_timer;
+- int tmf_state; /* see TMF_INITIAL, etc.*/
++ struct timer_list tmabort_timer;
++ int tmabort_state; /* see TMABORT_INITIAL, etc.*/
+
+ /* negotiated params */
+- unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
+- unsigned max_xmit_dlength; /* target_max_recv_dsl */
++ int max_recv_dlength; /* initiator_max_recv_dsl*/
++ int max_xmit_dlength; /* target_max_recv_dsl */
+ int hdrdgst_en;
+ int datadgst_en;
+ int ifmarker_en;
+@@ -192,9 +183,6 @@ struct iscsi_conn {
+ /* values userspace uses to id a conn */
+ int persistent_port;
+ char *persistent_address;
+- /* remote portal currently connected to */
+- int portal_port;
+- char portal_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,65 +197,34 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+-struct iscsi_pool {
++struct iscsi_queue {
+ struct kfifo *queue; /* FIFO Queue */
+ void **pool; /* Pool of elements */
+ int max; /* Max number of elements */
+ };
+
+-/* Session's states */
+-enum {
+- ISCSI_STATE_FREE = 1,
+- ISCSI_STATE_LOGGED_IN,
+- ISCSI_STATE_FAILED,
+- ISCSI_STATE_TERMINATE,
+- ISCSI_STATE_IN_RECOVERY,
+- ISCSI_STATE_RECOVERY_FAILED,
+- ISCSI_STATE_LOGGING_OUT,
+-};
+-
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+- /*
+- * Syncs up the scsi eh thread with the iscsi eh thread when sending
+- * task management functions. This must be taken before the session
+- * and recv lock.
+- */
+- struct mutex eh_mutex;
+-
+ /* iSCSI session-wide sequencing */
+ uint32_t cmdsn;
+ uint32_t exp_cmdsn;
+ uint32_t max_cmdsn;
+
+- /* This tracks the reqs queued into the initiator */
+- uint32_t queued_cmdsn;
+-
+ /* configuration */
+- int abort_timeout;
+- int lu_reset_timeout;
+ int initial_r2t_en;
+- unsigned max_r2t;
++ int max_r2t;
+ int imm_data_en;
+- unsigned first_burst;
+- unsigned max_burst;
++ int first_burst;
++ int max_burst;
+ int time2wait;
+ int time2retain;
+ int pdu_inorder_en;
+ int dataseq_inorder_en;
+ int erl;
+- int fast_abort;
+ int tpgt;
+- char *username;
+- char *username_in;
+- char *password;
+- char *password_in;
+ char *targetname;
+- char *ifacename;
+- char *initiatorname;
++
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +238,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
+- struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
++ struct iscsi_queue cmdpool; /* PDU's pool */
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -303,48 +252,31 @@ struct iscsi_host {
+ extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth);
+ extern int iscsi_eh_abort(struct scsi_cmnd *sc);
+ extern int iscsi_eh_host_reset(struct scsi_cmnd *sc);
+-extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
+ /*
+- * iSCSI host helpers.
+- */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+-extern int iscsi_host_set_param(struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf,
+- int buflen);
+-extern int iscsi_host_get_param(struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+-
+-/*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
+-#define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,17 +285,13 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+-
+-#define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+-extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern int iscsi_check_assign_cmdsn(struct iscsi_session *,
++ struct iscsi_nopin *);
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+@@ -371,34 +299,13 @@ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
+
+ /*
+ * generic helpers
+ */
+-extern void iscsi_pool_free(struct iscsi_pool *);
+-extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int);
+-
+-/*
+- * inline functions to deal with padding.
+- */
+-static inline unsigned int
+-iscsi_padded(unsigned int len)
+-{
+- return (len + ISCSI_PAD_LEN - 1) & ~(ISCSI_PAD_LEN - 1);
+-}
+-
+-static inline unsigned int
+-iscsi_padding(unsigned int len)
+-{
+- len &= (ISCSI_PAD_LEN - 1);
+- if (len)
+- len = ISCSI_PAD_LEN - len;
+- return len;
+-}
++extern void iscsi_pool_free(struct iscsi_queue *, void **);
++extern int iscsi_pool_init(struct iscsi_queue *, int, void ***, int);
+
+ #endif
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..d5c218d 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -24,17 +24,15 @@
+ #define SCSI_TRANSPORT_ISCSI_H
+
+ #include <linux/device.h>
+-#include <linux/list.h>
+-#include <linux/mutex.h>
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +56,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -84,11 +79,17 @@ struct iscsi_transport {
+ char *name;
+ unsigned int caps;
+ /* LLD sets this to indicate what values it can export to sysfs */
+- uint64_t param_mask;
+- uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ unsigned int param_mask;
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -104,25 +105,26 @@ struct iscsi_transport {
+ enum iscsi_param param, char *buf);
+ int (*get_session_param) (struct iscsi_cls_session *session,
+ enum iscsi_param param, char *buf);
+- int (*get_host_param) (struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf);
+- int (*set_host_param) (struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf,
+- int buflen);
+ int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ void (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask,
++ char *data, uint32_t data_size);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
+- int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
++ int (*tgt_dscvr) (enum iscsi_tgt_dscvr type, uint32_t host_no,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+
+@@ -139,6 +141,13 @@ extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
+ extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size);
+
++
++/* Connection's states */
++#define ISCSI_CONN_INITIAL_STAGE 0
++#define ISCSI_CONN_STARTED 1
++#define ISCSI_CONN_STOPPED 2
++#define ISCSI_CONN_CLEANUP_WAIT 3
++
+ struct iscsi_cls_conn {
+ struct list_head conn_list; /* item in connlist */
+ void *dd_data; /* LLD private data */
+@@ -152,34 +161,25 @@ struct iscsi_cls_conn {
+ #define iscsi_dev_to_conn(_dev) \
+ container_of(_dev, struct iscsi_cls_conn, dev)
+
+-#define iscsi_conn_to_session(_conn) \
+- iscsi_dev_to_session(_conn->dev.parent)
+-
+-/* iscsi class session state */
+-enum {
+- ISCSI_SESSION_LOGGED_IN,
+- ISCSI_SESSION_FAILED,
+- ISCSI_SESSION_FREE,
+-};
+-
+-#define ISCSI_MAX_TARGET -1
++/* Session's states */
++#define ISCSI_STATE_FREE 1
++#define ISCSI_STATE_LOGGED_IN 2
++#define ISCSI_STATE_FAILED 3
++#define ISCSI_STATE_TERMINATE 4
++#define ISCSI_STATE_IN_RECOVERY 5
++#define ISCSI_STATE_RECOVERY_FAILED 6
+
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+- spinlock_t lock;
+- struct work_struct block_work;
+- struct work_struct unblock_work;
+- struct work_struct scan_work;
+- struct work_struct unbind_work;
+
+ /* recovery fields */
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+- int state;
+ int sid; /* session id */
+ void *dd_data; /* LLD private data */
+ struct device dev; /* sysfs transport/container device */
+@@ -194,53 +194,31 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
+- atomic_t nr_scans;
++struct iscsi_host {
++ struct list_head sessions;
+ struct mutex mutex;
+- struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
+ };
+
+ /*
+ * session and connection functions that can be used by HW iSCSI LLDs
+ */
+-#define iscsi_cls_session_printk(prefix, _cls_session, fmt, a...) \
+- dev_printk(prefix, &(_cls_session)->dev, fmt, ##a)
+-
+-#define iscsi_cls_conn_printk(prefix, _cls_conn, fmt, a...) \
+- dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a)
+-
+-extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+-extern int iscsi_session_event(struct iscsi_cls_session *session,
+- enum iscsi_uevent_e event);
++extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn);
++extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+-extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
++
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U6/iscsi_02_add_to_2_6_9.patch b/kernel_patches/backport/2.6.9_U6/iscsi_02_add_to_2_6_9.patch
new file mode 100644
index 0000000..1f05d95
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iscsi_02_add_to_2_6_9.patch
@@ -0,0 +1,180 @@
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 4376840..11dfaf9 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -2145,7 +2145,6 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ static struct scsi_host_template iscsi_sht = {
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+- .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .max_sectors = 0xFFFF,
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index d37048c..60f5846 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1366,7 +1366,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
+ shost->max_lun = iscsit->max_lun;
+ shost->max_cmd_len = iscsit->max_cmd_len;
+ shost->transportt = scsit;
+- shost->transportt->create_work_queue = 1;
+ *hostno = shost->host_no;
+
+ session = iscsi_hostdata(shost->hostdata);
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 8133c22..f1c68f7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -65,6 +65,8 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define cdev_to_iscsi_internal(_cdev) \
+ container_of(_cdev, struct iscsi_internal, cdev)
+
++extern int attribute_container_init(void);
++
+ static void iscsi_transport_release(struct class_device *cdev)
+ {
+ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+@@ -80,6 +82,17 @@ static struct class iscsi_transport_class = {
+ .release = iscsi_transport_release,
+ };
+
++static void iscsi_host_class_release(struct class_device *class_dev)
++{
++ struct Scsi_Host *shost = transport_class_to_shost(class_dev);
++ put_device(&shost->shost_gendev);
++}
++
++struct class iscsi_host_class = {
++ .name = "iscsi_host",
++ .release = iscsi_host_class_release,
++};
++
+ static ssize_t
+ show_transport_handle(struct class_device *cdev, char *buf)
+ {
+@@ -115,10 +128,8 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct class_device *cdev)
++static int iscsi_setup_host(struct Scsi_Host *shost)
+ {
+- struct Scsi_Host *shost = dev_to_shost(dev);
+ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+@@ -127,12 +138,6 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ return 0;
+ }
+
+-static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+- "iscsi_host",
+- iscsi_setup_host,
+- NULL,
+- NULL);
+-
+ static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+ "iscsi_session",
+ NULL,
+@@ -216,24 +221,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_host *ihost = shost->shost_data;
+- struct iscsi_cls_session *session;
+-
+- mutex_lock(&ihost->mutex);
+- list_for_each_entry(session, &ihost->sessions, host_list) {
+- if ((channel == SCAN_WILD_CARD || channel == 0) &&
+- (id == SCAN_WILD_CARD || id == session->target_id))
+- scsi_scan_target(&session->dev, 0,
+- session->target_id, lun, 1);
+- }
+- mutex_unlock(&ihost->mutex);
+-
+- return 0;
+-}
+-
+ static void session_recovery_timedout(struct work_struct *work)
+ {
+ struct iscsi_cls_session *session =
+@@ -362,8 +349,6 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
+ list_del(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+- scsi_remove_target(&session->dev);
+-
+ transport_unregister_device(&session->dev);
+ device_del(&session->dev);
+ }
+@@ -1269,24 +1254,6 @@ static int iscsi_conn_match(struct attribute_container *cont,
+ return &priv->conn_cont.ac == cont;
+ }
+
+-static int iscsi_host_match(struct attribute_container *cont,
+- struct device *dev)
+-{
+- struct Scsi_Host *shost;
+- struct iscsi_internal *priv;
+-
+- if (!scsi_is_host_device(dev))
+- return 0;
+-
+- shost = dev_to_shost(dev);
+- if (!shost->transportt ||
+- shost->transportt->host_attrs.ac.class != &iscsi_host_class.class)
+- return 0;
+-
+- priv = to_iscsi_internal(shost->transportt);
+- return &priv->t.host_attrs.ac == cont;
+-}
+-
+ struct scsi_transport_template *
+ iscsi_register_transport(struct iscsi_transport *tt)
+ {
+@@ -1306,7 +1273,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ INIT_LIST_HEAD(&priv->list);
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+- priv->t.user_scan = iscsi_user_scan;
+
+ priv->cdev.class = &iscsi_transport_class;
+ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
+@@ -1319,12 +1285,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ goto unregister_cdev;
+
+ /* host parameters */
+- priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+- priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+- priv->t.host_attrs.ac.match = iscsi_host_match;
++ priv->t.host_attrs = &priv->host_attrs[0];
++ priv->t.host_class = &iscsi_host_class;
++ priv->t.host_setup = iscsi_setup_host;
+ priv->t.host_size = sizeof(struct iscsi_host);
+- priv->host_attrs[0] = NULL;
+- transport_container_register(&priv->t.host_attrs);
+
+ /* connection parameters */
+ priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
+@@ -1402,7 +1366,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+
+ transport_container_unregister(&priv->conn_cont);
+ transport_container_unregister(&priv->session_cont);
+- transport_container_unregister(&priv->t.host_attrs);
+
+ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
+ class_device_unregister(&priv->cdev);
+@@ -1420,6 +1420,7 @@ static __init int iscsi_transport_init(void)
+ ISCSI_TRANSPORT_VERSION);
+
+ atomic_set(&iscsi_session_nr, 0);
++ attribute_container_init();
+
+ err = class_register(&iscsi_transport_class);
+ if (err)
+ return err;
diff --git a/kernel_patches/backport/2.6.9_U6/iscsi_03_add_session_wq.patch b/kernel_patches/backport/2.6.9_U6/iscsi_03_add_session_wq.patch
new file mode 100644
index 0000000..5a77c07
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iscsi_03_add_session_wq.patch
@@ -0,0 +1,76 @@
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index a6f2303..5d62cc0 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -612,7 +612,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (resume_tx) {
+ iser_dbg("%ld resuming tx\n",jiffies);
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ queue_work(conn->session->wq, &conn->xmitwork);
+ }
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index e8020a5..43e9128 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -828,7 +828,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ spin_unlock(&session->lock);
+
+- scsi_queue_work(host, &conn->xmitwork);
++ queue_work(session->wq, &conn->xmitwork);
+ return 0;
+
+ reject:
+@@ -928,7 +928,7 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ else
+ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
+
+- scsi_queue_work(session->host, &conn->xmitwork);
++ queue_work(session->wq, &conn->xmitwork);
+ return 0;
+ }
+
+@@ -1415,6 +1415,9 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
+ INIT_LIST_HEAD(&mtask->running);
+ }
+
++ session->wq = create_singlethread_workqueue("");
++ BUG_ON(!session->wq);
++
+ if (scsi_add_host(shost, NULL))
+ goto add_host_fail;
+
+@@ -1462,6 +1465,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+
+ kfree(session->targetname);
+
++ destroy_workqueue(session->wq);
++
+ iscsi_destroy_session(cls_session);
+ scsi_host_put(shost);
+ module_put(owner);
+@@ -1595,7 +1600,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+
+ /* flush queued up work because we free the connection below */
+- scsi_flush_work(session->host);
++ flush_workqueue(session->wq);
+
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index ea0816d..e8a95f5 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -244,6 +244,8 @@ struct iscsi_session {
+ int mgmtpool_max; /* size of mgmt array */
+ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
+ struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
++
++ struct workqueue_struct *wq;
+ };
+
+ /*
diff --git a/kernel_patches/backport/2.6.9_U6/iscsi_04_inet_sock_to_opt.patch b/kernel_patches/backport/2.6.9_U6/iscsi_04_inet_sock_to_opt.patch
new file mode 100644
index 0000000..1fb2376
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iscsi_04_inet_sock_to_opt.patch
@@ -0,0 +1,13 @@
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 905efc4..f73a743 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -2027,7 +2027,7 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct inet_sock *inet;
++ struct inet_opt *inet;
+ struct ipv6_pinfo *np;
+ struct sock *sk;
+ int len;
diff --git a/kernel_patches/backport/2.6.9_U6/iscsi_05_release_host_lock_before_eh.patch b/kernel_patches/backport/2.6.9_U6/iscsi_05_release_host_lock_before_eh.patch
new file mode 100644
index 0000000..c994506
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iscsi_05_release_host_lock_before_eh.patch
@@ -0,0 +1,60 @@
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 7db081b..211944e 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -968,12 +968,14 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ struct iscsi_conn *conn = session->leadconn;
+ int fail_session = 0;
+
++ spin_unlock_irq(host->host_lock);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+ failed:
+ debug_scsi("failing host reset: session terminated "
+ "[CID %d age %d]\n", conn->id, session->age);
+ spin_unlock_bh(&session->lock);
++ spin_lock_irq(host->host_lock);
+ return FAILED;
+ }
+
+@@ -1005,6 +1007,7 @@ failed:
+ else
+ goto failed;
+ spin_unlock_bh(&session->lock);
++ spin_lock_irq(host->host_lock);
+
+ return SUCCESS;
+ }
+@@ -1162,13 +1165,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ struct iscsi_conn *conn;
+ struct iscsi_session *session;
+ int rc;
++ struct Scsi_Host *shost = sc->device->host;
+
++ spin_unlock_irq(shost->host_lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ debug_scsi("sc never reached iscsi layer or it completed.\n");
++ spin_lock_irq(shost->host_lock);
+ return SUCCESS;
+ }
+
+@@ -1253,6 +1259,7 @@ success_cleanup:
+
+ success_rel_mutex:
+ mutex_unlock(&conn->xmitmutex);
++ spin_lock_irq(shost->host_lock);
+ return SUCCESS;
+
+ failed:
+@@ -1260,6 +1267,7 @@ failed:
+ mutex_unlock(&conn->xmitmutex);
+
+ debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
++ spin_lock_irq(shost->host_lock);
+ return FAILED;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_abort);
diff --git a/kernel_patches/backport/2.6.9_U6/iscsi_06_scsi_addons.patch b/kernel_patches/backport/2.6.9_U6/iscsi_06_scsi_addons.patch
new file mode 100644
index 0000000..a114696
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iscsi_06_scsi_addons.patch
@@ -0,0 +1,75 @@
+diff --git a/drivers/scsi/init.c b/drivers/scsi/init.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/init.c
+@@ -0,0 +1 @@
++#include "src/init.c"
+diff --git a/drivers/scsi/attribute_container.c b/drivers/scsi/attribute_container.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/attribute_container.c
+@@ -0,0 +1 @@
++#include "../drivers/base/attribute_container.c"
+diff --git a/drivers/scsi/transport_class.c b/drivers/scsi/transport_class.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/transport_class.c
+@@ -0,0 +1 @@
++#include "../drivers/base/transport_class.c"
+diff --git a/drivers/scsi/klist.c b/drivers/scsi/klist.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/klist.c
+@@ -0,0 +1 @@
++#include "../../lib/klist.c"
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi.c
+@@ -0,0 +1 @@
++#include "src/scsi.c"
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_lib.c
+@@ -0,0 +1 @@
++#include "src/scsi_lib.c"
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_scan.c
+@@ -0,0 +1 @@
++#include "src/scsi_scan.c"
+diff --git a/drivers/scsi/libiscsi_f.c b/drivers/scsi/libiscsi_f.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/libiscsi_f.c
+@@ -0,0 +1 @@
++#include "libiscsi.c"
+diff --git a/drivers/scsi/scsi_transport_iscsi_f.c b/drivers/scsi/scsi_transport_iscsi_f.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_transport_iscsi_f.c
+@@ -0,0 +1 @@
++#include "scsi_transport_iscsi.c"
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index e212608..3bf2015 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -3,2 +3,7 @@
+ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
+ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
++
++CFLAGS_attribute_container.o = $(BACKPORT_INCLUDES)/src/
++
++scsi_transport_iscsi-y := scsi_transport_iscsi_f.o scsi.o scsi_lib.o init.o klist.o attribute_container.o transport_class.o
++libiscsi-y := libiscsi_f.o scsi_scan.o
diff --git a/kernel_patches/backport/2.6.9_U6/iser_00_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.9_U6/iser_00_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..ff5d719
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_00_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From 12b757e92112750b4bc90cf8150d20484d684dcf Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 21 Aug 2008 14:28:56 +0300
+Subject: [PATCH] iser_sync_kernel_code_with_2.6.26
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U6/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch b/kernel_patches/backport/2.6.9_U6/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
new file mode 100644
index 0000000..101fdc6
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
@@ -0,0 +1,44 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..75ecabe 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -211,10 +211,10 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ int error = 0;
+
+ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(ctask->sc) == 0);
++ BUG_ON(ctask->sc->request_bufflen == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->itt, ctask->sc->request_bufflen,
+ ctask->imm_count, ctask->unsol_count);
+ }
+
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 5d62cc0..1ae80d8 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -349,12 +349,18 @@ int iser_send_command(struct iscsi_conn *conn,
+ else
+ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+- if (scsi_sg_count(sc)) { /* using a scatter list */
+- data_buf->buf = scsi_sglist(sc);
+- data_buf->size = scsi_sg_count(sc);
++ if (sc->use_sg) { /* using a scatter list */
++ data_buf->buf = sc->request_buffer;
++ data_buf->size = sc->use_sg;
++ } else if (sc->request_bufflen) {
++ /* using a single buffer - convert it into one entry SG */
++ sg_init_one(&data_buf->sg_single,
++ sc->request_buffer, sc->request_bufflen);
++ data_buf->buf = &data_buf->sg_single;
++ data_buf->size = 1;
+ }
+
+- data_buf->data_len = scsi_bufflen(sc);
++ data_buf->data_len = sc->request_bufflen;
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ err = iser_prepare_read_cmd(ctask, edtl);
diff --git a/kernel_patches/backport/2.6.9_U6/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch b/kernel_patches/backport/2.6.9_U6/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
new file mode 100644
index 0000000..7b21cba
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
@@ -0,0 +1,12 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..933429b 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -586,7 +586,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+- ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
diff --git a/kernel_patches/backport/2.6.9_U6/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch b/kernel_patches/backport/2.6.9_U6/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
new file mode 100644
index 0000000..d72eb5a
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
@@ -0,0 +1,74 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..7baac99 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -368,8 +368,7 @@ static struct iscsi_transport iscsi_iser_transport;
+ static struct iscsi_cls_session *
+ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct scsi_transport_template *scsit,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+@@ -380,13 +380,7 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iser_desc *desc;
+
+- /*
+- * we do not support setting can_queue cmd_per_lun from userspace yet
+- * because we preallocate so many resources
+- */
+ cls_session = iscsi_session_setup(iscsit, scsit,
+- ISCSI_DEF_XMIT_CMDS_MAX,
+- ISCSI_MAX_CMD_PER_LUN,
+ sizeof(struct iscsi_iser_cmd_task),
+ sizeof(struct iser_desc),
+ initial_cmdsn, &hn);
+@@ -550,7 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
++ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 1ee867b..671faff 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -105,7 +105,7 @@
+ #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
+ * SCSI_TMFUNC(2), LOGOUT(1) */
+
+-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
++#define ISER_QP_MAX_RECV_DTOS (ISCSI_XMIT_CMDS_MAX + \
+ ISER_MAX_RX_MISC_PDUS + \
+ ISER_MAX_TX_MISC_PDUS)
+
+@@ -117,7 +117,7 @@
+
+ #define ISER_INFLIGHT_DATAOUTS 8
+
+-#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
++#define ISER_QP_MAX_REQ_DTOS (ISCSI_XMIT_CMDS_MAX * \
+ (1 + ISER_INFLIGHT_DATAOUTS) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 654a4dc..f3d8ba5 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -154,8 +154,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+ params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
+ /* make the pool size twice the max number of SCSI commands *
+ * the ML is expected to queue, watermark for unmap at 50% */
+- params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
+- params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
++ params.pool_size = ISCSI_XMIT_CMDS_MAX * 2;
++ params.dirty_watermark = ISCSI_XMIT_CMDS_MAX;
+ params.cache = 0;
+ params.flush_function = NULL;
+ params.access = (IB_ACCESS_LOCAL_WRITE |
diff --git a/kernel_patches/backport/2.6.9_U6/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch b/kernel_patches/backport/2.6.9_U6/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
new file mode 100644
index 0000000..26fa09c
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
@@ -0,0 +1,38 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 8f7b859..5f82d6c 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -134,9 +134,18 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iser_ctask->command_sent = 0;
+ iser_ctask->iser_conn = iser_conn;
++ if (sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(sc->request_bufflen == 0);
++
++ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
++ ctask->itt, sc->request_bufflen, ctask->imm_count,
++ ctask->unsol_count);
++ }
++
+ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+@@ -210,14 +219,6 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(ctask->sc->request_bufflen == 0);
+-
+- debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, ctask->sc->request_bufflen,
+- ctask->imm_count, ctask->unsol_count);
+- }
+-
+ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
+ conn->id, ctask->itt);
+
diff --git a/kernel_patches/backport/2.6.9_U6/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch b/kernel_patches/backport/2.6.9_U6/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
new file mode 100644
index 0000000..417415f
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
@@ -0,0 +1,18 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5f82d6c..3a67d76 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -574,11 +574,8 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+- ISCSI_TARGET_NAME | ISCSI_TPGT |
+- ISCSI_USERNAME | ISCSI_PASSWORD |
+- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO,
++ ISCSI_TARGET_NAME |
++ ISCSI_TPGT,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_INITIATOR_NAME,
+ .host_template = &iscsi_iser_sht,
diff --git a/kernel_patches/backport/2.6.9_U6/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch b/kernel_patches/backport/2.6.9_U6/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
new file mode 100644
index 0000000..0b1a4c4
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
@@ -0,0 +1,16 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index c5941fa..2f4f125 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -140,8 +140,8 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ iser_ctask->iser_conn = iser_conn;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(sc->request_bufflen == 0);
++ BUG_ON(ctask->total_length == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, sc->request_bufflen, ctask->imm_count,
++ ctask->itt, ctask->total_length, ctask->imm_count,
+ ctask->unsol_count);
+ }
+
diff --git a/kernel_patches/backport/2.6.9_U6/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch b/kernel_patches/backport/2.6.9_U6/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
new file mode 100644
index 0000000..f207af3
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
@@ -0,0 +1,14 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 2f4f125..940bf98 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -576,8 +576,7 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME |
+ ISCSI_TPGT,
+- .host_param_mask = ISCSI_HOST_HWADDRESS |
+- ISCSI_HOST_INITIATOR_NAME,
++ .host_param_mask = ISCSI_HOST_HWADDRESS,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
diff --git a/kernel_patches/backport/2.6.9_U6/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch b/kernel_patches/backport/2.6.9_U6/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
new file mode 100644
index 0000000..f9dceb1
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
@@ -0,0 +1,22 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 940bf98..6a35eff 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -576,7 +576,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME |
+ ISCSI_TPGT,
+- .host_param_mask = ISCSI_HOST_HWADDRESS,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
+@@ -593,9 +593,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+ .stop_conn = iscsi_conn_stop,
+- /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
+- .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
diff --git a/kernel_patches/backport/2.6.9_U6/iser_09_fix_inclusion_order.patch b/kernel_patches/backport/2.6.9_U6/iser_09_fix_inclusion_order.patch
new file mode 100644
index 0000000..3c2a969
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_09_fix_inclusion_order.patch
@@ -0,0 +1,13 @@
+--- linux-2.6.20-rc7-orig/drivers/infiniband/ulp/iser/iscsi_iser.c 2007-02-08 09:13:43.000000000 +0200
++++ linux-2.6.20-rc7/drivers/infiniband/ulp/iser/iscsi_iser.c 2007-02-08 09:14:31.000000000 +0200
+@@ -70,9 +70,8 @@
+ #include <scsi/scsi_tcq.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi.h>
+-#include <scsi/scsi_transport_iscsi.h>
+-
+ #include "iscsi_iser.h"
++#include <scsi/scsi_transport_iscsi.h>
+
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
diff --git a/kernel_patches/backport/2.6.9_U6/iser_10_fix_struct_scsi_host_template.patch b/kernel_patches/backport/2.6.9_U6/iser_10_fix_struct_scsi_host_template.patch
new file mode 100644
index 0000000..5b28ac4
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_10_fix_struct_scsi_host_template.patch
@@ -0,0 +1,31 @@
+From 828e0ad429b92cf75781770ceb9ef7086f34fde2 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:31:42 +0300
+Subject: [PATCH] fix_struct_scsi_host_template
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 9bf24c6..de1e783 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -542,13 +542,11 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .module = THIS_MODULE,
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+- .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+- .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "iscsi_iser",
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U6/iser_11_add_fmr_unalign_cnt.patch b/kernel_patches/backport/2.6.9_U6/iser_11_add_fmr_unalign_cnt.patch
new file mode 100644
index 0000000..ef2a2d6
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_11_add_fmr_unalign_cnt.patch
@@ -0,0 +1,25 @@
+From 1255c8e5209ce19644e83e353c260f2eddc62cca Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:54:57 +0300
+Subject: [PATCH] add fmr_unalign_cnt to struct iscsi_conn
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/libiscsi.h | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index ea0816d..182421f 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -197,6 +197,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_queue {
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U6/iser_12_remove_hdr_max.patch b/kernel_patches/backport/2.6.9_U6/iser_12_remove_hdr_max.patch
new file mode 100644
index 0000000..c475001
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_12_remove_hdr_max.patch
@@ -0,0 +1,25 @@
+From 97672ef8a29da5e16774d1de9527b2cc29415e36 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:59:16 +0300
+Subject: [PATCH] remove hdr_max
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index de1e783..6451e9d 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -394,7 +394,6 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ ctask = session->cmds[i];
+ iser_ctask = ctask->dd_data;
+ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
+- ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+
+ for (i = 0; i < session->mgmtpool_max; i++) {
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U6/iser_13_fix_netlink_kernel_create.patch b/kernel_patches/backport/2.6.9_U6/iser_13_fix_netlink_kernel_create.patch
new file mode 100644
index 0000000..d47df44
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_13_fix_netlink_kernel_create.patch
@@ -0,0 +1,26 @@
+From db61fe2c3062d8918e793ddc7e1a8cc3694bf620 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:20:42 +0300
+Subject: [PATCH] fix netlink_kernel_create
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index e969ef7..a2f4fb7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -1401,7 +1401,7 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ nls = netlink_kernel_create(NULL, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
+ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U6/iser_14_sync_attribute_container.c_from_ofed1.3.patch b/kernel_patches/backport/2.6.9_U6/iser_14_sync_attribute_container.c_from_ofed1.3.patch
new file mode 100644
index 0000000..e926007
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_14_sync_attribute_container.c_from_ofed1.3.patch
@@ -0,0 +1,394 @@
+From bed65721f623039a119b5ff03c6c1fe44a1ccfb3 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:26:20 +0300
+Subject: [PATCH] sync attribute_container.c from ofed1.3
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/base/attribute_container.c | 100 +++++++++++++++++------------------
+ drivers/base/transport_class.c | 21 ++++----
+ 2 files changed, 60 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
+index f57652d..7370d7c 100644
+--- a/drivers/base/attribute_container.c
++++ b/drivers/base/attribute_container.c
+@@ -27,21 +27,21 @@
+ struct internal_container {
+ struct klist_node node;
+ struct attribute_container *cont;
+- struct device classdev;
++ struct class_device classdev;
+ };
+
+ static void internal_container_klist_get(struct klist_node *n)
+ {
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+- get_device(&ic->classdev);
++ class_device_get(&ic->classdev);
+ }
+
+ static void internal_container_klist_put(struct klist_node *n)
+ {
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+- put_device(&ic->classdev);
++ class_device_put(&ic->classdev);
+ }
+
+
+@@ -53,7 +53,7 @@ static void internal_container_klist_put(struct klist_node *n)
+ * Returns the container associated with this classdev.
+ */
+ struct attribute_container *
+-attribute_container_classdev_to_container(struct device *classdev)
++attribute_container_classdev_to_container(struct class_device *classdev)
+ {
+ struct internal_container *ic =
+ container_of(classdev, struct internal_container, classdev);
+@@ -61,7 +61,7 @@ attribute_container_classdev_to_container(struct device *classdev)
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
+
+-static LIST_HEAD(attribute_container_list);
++static struct list_head attribute_container_list;
+
+ static DEFINE_MUTEX(attribute_container_mutex);
+
+@@ -110,11 +110,11 @@ attribute_container_unregister(struct attribute_container *cont)
+ EXPORT_SYMBOL_GPL(attribute_container_unregister);
+
+ /* private function used as class release */
+-static void attribute_container_release(struct device *classdev)
++static void attribute_container_release(struct class_device *classdev)
+ {
+ struct internal_container *ic
+ = container_of(classdev, struct internal_container, classdev);
+- struct device *dev = classdev->parent;
++ struct device *dev = classdev->dev;
+
+ kfree(ic);
+ put_device(dev);
+@@ -129,12 +129,12 @@ static void attribute_container_release(struct device *classdev)
+ * This function allocates storage for the class device(s) to be
+ * attached to dev (one for each matching attribute_container). If no
+ * fn is provided, the code will simply register the class device via
+- * device_add. If a function is provided, it is expected to add
++ * class_device_add. If a function is provided, it is expected to add
+ * the class device at the appropriate time. One of the things that
+ * might be necessary is to allocate and initialise the classdev and
+ * then add it a later time. To do this, call this routine for
+ * allocation and initialisation and then use
+- * attribute_container_device_trigger() to call device_add() on
++ * attribute_container_device_trigger() to call class_device_add() on
+ * it. Note: after this, the class device contains a reference to dev
+ * which is not relinquished until the release of the classdev.
+ */
+@@ -142,7 +142,7 @@ void
+ attribute_container_add_device(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -163,11 +163,11 @@ attribute_container_add_device(struct device *dev,
+ }
+
+ ic->cont = cont;
+- device_initialize(&ic->classdev);
+- ic->classdev.parent = get_device(dev);
++ class_device_initialize(&ic->classdev);
++ ic->classdev.dev = get_device(dev);
+ ic->classdev.class = cont->class;
+- cont->class->dev_release = attribute_container_release;
+- strcpy(ic->classdev.bus_id, dev->bus_id);
++ cont->class->release = attribute_container_release;
++ strcpy(ic->classdev.class_id, dev->bus_id);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else
+@@ -195,19 +195,20 @@ attribute_container_add_device(struct device *dev,
+ * @fn: A function to call to remove the device
+ *
+ * This routine triggers device removal. If fn is NULL, then it is
+- * simply done via device_unregister (note that if something
++ * simply done via class_device_unregister (note that if something
+ * still has a reference to the classdev, then the memory occupied
+ * will not be freed until the classdev is released). If you want a
+ * two phase release: remove from visibility and then delete the
+ * device, then you should use this routine with a fn that calls
+- * device_del() and then use attribute_container_device_trigger()
+- * to do the final put on the classdev.
++ * class_device_del() and then use
++ * attribute_container_device_trigger() to do the final put on the
++ * classdev.
+ */
+ void
+ attribute_container_remove_device(struct device *dev,
+ void (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -223,14 +224,14 @@ attribute_container_remove_device(struct device *dev,
+ continue;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (dev != ic->classdev.parent)
++ if (dev != ic->classdev.dev)
+ continue;
+ klist_del(&ic->node);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else {
+ attribute_container_remove_attrs(&ic->classdev);
+- device_unregister(&ic->classdev);
++ class_device_unregister(&ic->classdev);
+ }
+ }
+ }
+@@ -251,7 +252,7 @@ void
+ attribute_container_device_trigger(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -269,7 +270,7 @@ attribute_container_device_trigger(struct device *dev,
+ }
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (dev == ic->classdev.parent)
++ if (dev == ic->classdev.dev)
+ fn(cont, dev, &ic->classdev);
+ }
+ }
+@@ -312,23 +313,18 @@ attribute_container_trigger(struct device *dev,
+ * attributes listed in the container
+ */
+ int
+-attribute_container_add_attrs(struct device *classdev)
++attribute_container_add_attrs(struct class_device *classdev)
+ {
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+- struct device_attribute **attrs = cont->attrs;
++ struct class_device_attribute **attrs = cont->attrs;
+ int i, error;
+
+- BUG_ON(attrs && cont->grp);
+-
+- if (!attrs && !cont->grp)
++ if (!attrs)
+ return 0;
+
+- if (cont->grp)
+- return sysfs_create_group(&classdev->kobj, cont->grp);
+-
+ for (i = 0; attrs[i]; i++) {
+- error = device_create_file(classdev, attrs[i]);
++ error = class_device_create_file(classdev, attrs[i]);
+ if (error)
+ return error;
+ }
+@@ -337,18 +333,18 @@ attribute_container_add_attrs(struct device *classdev)
+ }
+
+ /**
+- * attribute_container_add_class_device - same function as device_add
++ * attribute_container_add_class_device - same function as class_device_add
+ *
+ * @classdev: the class device to add
+ *
+- * This performs essentially the same function as device_add except for
++ * This performs essentially the same function as class_device_add except for
+ * attribute containers, namely add the classdev to the system and then
+ * create the attribute files
+ */
+ int
+-attribute_container_add_class_device(struct device *classdev)
++attribute_container_add_class_device(struct class_device *classdev)
+ {
+- int error = device_add(classdev);
++ int error = class_device_add(classdev);
+ if (error)
+ return error;
+ return attribute_container_add_attrs(classdev);
+@@ -363,7 +359,7 @@ attribute_container_add_class_device(struct device *classdev)
+ int
+ attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ return attribute_container_add_class_device(classdev);
+ }
+@@ -375,23 +371,18 @@ attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ *
+ */
+ void
+-attribute_container_remove_attrs(struct device *classdev)
++attribute_container_remove_attrs(struct class_device *classdev)
+ {
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+- struct device_attribute **attrs = cont->attrs;
++ struct class_device_attribute **attrs = cont->attrs;
+ int i;
+
+- if (!attrs && !cont->grp)
++ if (!attrs)
+ return;
+
+- if (cont->grp) {
+- sysfs_remove_group(&classdev->kobj, cont->grp);
+- return ;
+- }
+-
+ for (i = 0; attrs[i]; i++)
+- device_remove_file(classdev, attrs[i]);
++ class_device_remove_file(classdev, attrs[i]);
+ }
+
+ /**
+@@ -400,13 +391,13 @@ attribute_container_remove_attrs(struct device *classdev)
+ * @classdev: the class device
+ *
+ * This function simply removes all the attribute files and then calls
+- * device_del.
++ * class_device_del.
+ */
+ void
+-attribute_container_class_device_del(struct device *classdev)
++attribute_container_class_device_del(struct class_device *classdev)
+ {
+ attribute_container_remove_attrs(classdev);
+- device_del(classdev);
++ class_device_del(classdev);
+ }
+
+ /**
+@@ -418,16 +409,16 @@ attribute_container_class_device_del(struct device *classdev)
+ * Looks up the device in the container's list of class devices and returns
+ * the corresponding class_device.
+ */
+-struct device *
++struct class_device *
+ attribute_container_find_class_device(struct attribute_container *cont,
+ struct device *dev)
+ {
+- struct device *cdev = NULL;
++ struct class_device *cdev = NULL;
+ struct internal_container *ic;
+ struct klist_iter iter;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (ic->classdev.parent == dev) {
++ if (ic->classdev.dev == dev) {
+ cdev = &ic->classdev;
+ /* FIXME: must exit iterator then break */
+ klist_iter_exit(&iter);
+@@ -438,3 +429,10 @@ attribute_container_find_class_device(struct attribute_container *cont,
+ return cdev;
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
++
++int __init
++attribute_container_init(void)
++{
++ INIT_LIST_HEAD(&attribute_container_list);
++ return 0;
++}
+diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
+index 84997ef..f25e7c6 100644
+--- a/drivers/base/transport_class.c
++++ b/drivers/base/transport_class.c
+@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(transport_class_unregister);
+
+ static int anon_transport_dummy_function(struct transport_container *tc,
+ struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ /* do nothing */
+ return 0;
+@@ -108,14 +108,13 @@ EXPORT_SYMBOL_GPL(anon_transport_class_register);
+ */
+ void anon_transport_class_unregister(struct anon_transport_class *atc)
+ {
+- if (unlikely(attribute_container_unregister(&atc->container)))
+- BUG();
++ attribute_container_unregister(&atc->container);
+ }
+ EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
+
+ static int transport_setup_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+@@ -127,7 +126,9 @@ static int transport_setup_classdev(struct attribute_container *cont,
+ }
+
+ /**
+- * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
++ * transport_setup_device - declare a new dev for transport class association
++ * but don't make it visible yet.
++ *
+ * @dev: the generic device representing the entity being added
+ *
+ * Usually, dev represents some component in the HBA system (either
+@@ -149,7 +150,7 @@ EXPORT_SYMBOL_GPL(transport_setup_device);
+
+ static int transport_add_class_device(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ int error = attribute_container_add_class_device(classdev);
+ struct transport_container *tcont =
+@@ -181,7 +182,7 @@ EXPORT_SYMBOL_GPL(transport_add_device);
+
+ static int transport_configure(struct attribute_container *cont,
+ struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+@@ -212,7 +213,7 @@ EXPORT_SYMBOL_GPL(transport_configure_device);
+
+ static int transport_remove_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_container *tcont =
+ attribute_container_to_transport_container(cont);
+@@ -251,12 +252,12 @@ EXPORT_SYMBOL_GPL(transport_remove_device);
+
+ static void transport_destroy_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+
+ if (tclass->remove != anon_transport_dummy_function)
+- put_device(classdev);
++ class_device_put(classdev);
+ }
+
+
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U6/iser_15_fix_iscsi_free_mgmt_task.patch b/kernel_patches/backport/2.6.9_U6/iser_15_fix_iscsi_free_mgmt_task.patch
new file mode 100644
index 0000000..7a3a3ea
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U6/iser_15_fix_iscsi_free_mgmt_task.patch
@@ -0,0 +1,28 @@
+From 5a9fd2300982aca58f1306bdb98cab878998a607 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:53:59 +0300
+Subject: [PATCH] fix iscsi_free_mgmt_task
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iser_initiator.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 4e20c8b..e7f2399 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -627,7 +627,9 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&conn->session->lock);
+- iscsi_free_mgmt_task(conn, mtask);
++ list_del(&mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
+ spin_unlock(&session->lock);
+ }
+ }
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U7/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch b/kernel_patches/backport/2.6.9_U7/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
new file mode 100644
index 0000000..e35b289
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iscsi_01_sync_kernel_code_with_ofed_1_2_5.patch
@@ -0,0 +1,9402 @@
+From f75042cdafb7f42cd1f9a244872ae2f7896e3278 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Wed, 20 Aug 2008 14:32:54 +0300
+Subject: [PATCH 1/1] iscsi_01_sync_kernel_code_with_ofed_1_2_5
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/iscsi_tcp.c | 2537 +++++++++++++++++++----------------
+ drivers/scsi/iscsi_tcp.h | 136 ++-
+ drivers/scsi/libiscsi.c | 2501 ++++++++++++----------------------
+ drivers/scsi/scsi_transport_iscsi.c | 1208 +++++------------
+ include/scsi/iscsi_if.h | 119 +--
+ include/scsi/iscsi_proto.h | 23 +-
+ include/scsi/libiscsi.h | 247 ++---
+ include/scsi/scsi_transport_iscsi.h | 148 +--
+ 8 files changed, 2862 insertions(+), 4057 deletions(-)
+
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 2a2f009..c9a3abf 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -29,15 +29,14 @@
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/inet.h>
+-#include <linux/file.h>
+ #include <linux/blkdev.h>
+ #include <linux/crypto.h>
+ #include <linux/delay.h>
+ #include <linux/kfifo.h>
+ #include <linux/scatterlist.h>
++#include <linux/mutex.h>
+ #include <net/tcp.h>
+ #include <scsi/scsi_cmnd.h>
+-#include <scsi/scsi_device.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_transport_iscsi.h>
+@@ -48,7 +47,7 @@ MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus at yahoo.com>, "
+ "Alex Aizman <itn780 at yahoo.com>");
+ MODULE_DESCRIPTION("iSCSI/TCP data-path");
+ MODULE_LICENSE("GPL");
+-#undef DEBUG_TCP
++/* #define DEBUG_TCP */
+ #define DEBUG_ASSERT
+
+ #ifdef DEBUG_TCP
+@@ -64,515 +63,200 @@ MODULE_LICENSE("GPL");
+ #define BUG_ON(expr)
+ #endif
+
+-static struct scsi_transport_template *iscsi_tcp_scsi_transport;
+-static struct scsi_host_template iscsi_sht;
+-static struct iscsi_transport iscsi_tcp_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+-static int iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment);
+-
+-/*
+- * Scatterlist handling: inside the iscsi_segment, we
+- * remember an index into the scatterlist, and set data/size
+- * to the current scatterlist entry. For highmem pages, we
+- * kmap as needed.
+- *
+- * Note that the page is unmapped when we return from
+- * TCP's data_ready handler, so we may end up mapping and
+- * unmapping the same page repeatedly. The whole reason
+- * for this is that we shouldn't keep the page mapped
+- * outside the softirq.
+- */
+-
+-/**
+- * iscsi_tcp_segment_init_sg - init indicated scatterlist entry
+- * @segment: the buffer object
+- * @sg: scatterlist
+- * @offset: byte offset into that sg entry
+- *
+- * This function sets up the segment so that subsequent
+- * data is copied to the indicated sg entry, at the given
+- * offset.
+- */
+ static inline void
+-iscsi_tcp_segment_init_sg(struct iscsi_segment *segment,
+- struct scatterlist *sg, unsigned int offset)
++iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
+ {
+- segment->sg = sg;
+- segment->sg_offset = offset;
+- segment->size = min(sg->length - offset,
+- segment->total_size - segment->total_copied);
+- segment->data = NULL;
++ ibuf->sg.page = virt_to_page(vbuf);
++ ibuf->sg.offset = offset_in_page(vbuf);
++ ibuf->sg.length = size;
++ ibuf->sent = 0;
++ ibuf->use_sendmsg = 1;
+ }
+
+-/**
+- * iscsi_tcp_segment_map - map the current S/G page
+- * @segment: iscsi_segment
+- * @recv: 1 if called from recv path
+- *
+- * We only need to possibly kmap data if scatter lists are being used,
+- * because the iscsi passthrough and internal IO paths will never use high
+- * mem pages.
+- */
+ static inline void
+-iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
++iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
+ {
+- struct scatterlist *sg;
+-
+- if (segment->data != NULL || !segment->sg)
+- return;
+-
+- sg = segment->sg;
+- BUG_ON(segment->sg_mapped);
+- BUG_ON(sg->length == 0);
+-
++ ibuf->sg.page = sg->page;
++ ibuf->sg.offset = sg->offset;
++ ibuf->sg.length = sg->length;
+ /*
+- * If the page count is greater than one it is ok to send
+- * to the network layer's zero copy send path. If not we
+- * have to go the slow sendmsg path. We always map for the
+- * recv path.
++ * Fastpath: sg element fits into single page
+ */
+- if (page_count(sg_page(sg)) >= 1 && !recv)
+- return;
+-
+- debug_tcp("iscsi_tcp_segment_map %s %p\n", recv ? "recv" : "xmit",
+- segment);
+- segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
+- segment->data = segment->sg_mapped + sg->offset + segment->sg_offset;
++ if (sg->length + sg->offset <= PAGE_SIZE && !PageSlab(sg->page))
++ ibuf->use_sendmsg = 0;
++ else
++ ibuf->use_sendmsg = 1;
++ ibuf->sent = 0;
+ }
+
+-static inline void
+-iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
++static inline int
++iscsi_buf_left(struct iscsi_buf *ibuf)
+ {
+- debug_tcp("iscsi_tcp_segment_unmap %p\n", segment);
++ int rc;
+
+- if (segment->sg_mapped) {
+- debug_tcp("iscsi_tcp_segment_unmap valid\n");
+- kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
+- segment->sg_mapped = NULL;
+- segment->data = NULL;
+- }
++ rc = ibuf->sg.length - ibuf->sent;
++ BUG_ON(rc < 0);
++ return rc;
+ }
+
+-/*
+- * Splice the digest buffer into the buffer
+- */
+ static inline void
+-iscsi_tcp_segment_splice_digest(struct iscsi_segment *segment, void *digest)
++iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
++ u8* crc)
+ {
+- segment->data = digest;
+- segment->digest_len = ISCSI_DIGEST_SIZE;
+- segment->total_size += ISCSI_DIGEST_SIZE;
+- segment->size = ISCSI_DIGEST_SIZE;
+- segment->copied = 0;
+- segment->sg = NULL;
+- segment->hash = NULL;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++
++ crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
++ buf->sg.length = tcp_conn->hdr_size;
+ }
+
+-/**
+- * iscsi_tcp_segment_done - check whether the segment is complete
+- * @segment: iscsi segment to check
+- * @recv: set to one of this is called from the recv path
+- * @copied: number of bytes copied
+- *
+- * Check if we're done receiving this segment. If the receive
+- * buffer is full but we expect more data, move on to the
+- * next entry in the scatterlist.
+- *
+- * If the amount of data we received isn't a multiple of 4,
+- * we will transparently receive the pad bytes, too.
+- *
+- * This function must be re-entrant.
+- */
+ static inline int
+-iscsi_tcp_segment_done(struct iscsi_segment *segment, int recv, unsigned copied)
++iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
+ {
+- static unsigned char padbuf[ISCSI_PAD_LEN];
+- struct scatterlist sg;
+- unsigned int pad;
++ struct sk_buff *skb = tcp_conn->in.skb;
++
++ tcp_conn->in.zero_copy_hdr = 0;
+
+- debug_tcp("copied %u %u size %u %s\n", segment->copied, copied,
+- segment->size, recv ? "recv" : "xmit");
+- if (segment->hash && copied) {
++ if (tcp_conn->in.copy >= tcp_conn->hdr_size &&
++ tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
+ /*
+- * If a segment is kmapd we must unmap it before sending
+- * to the crypto layer since that will try to kmap it again.
++ * Zero-copy PDU Header: using connection context
++ * to store header pointer.
+ */
+- iscsi_tcp_segment_unmap(segment);
+-
+- if (!segment->data) {
+- sg_init_table(&sg, 1);
+- sg_set_page(&sg, sg_page(segment->sg), copied,
+- segment->copied + segment->sg_offset +
+- segment->sg->offset);
+- } else
+- sg_init_one(&sg, segment->data + segment->copied,
+- copied);
+- crypto_hash_update(segment->hash, &sg, copied);
+- }
+-
+- segment->copied += copied;
+- if (segment->copied < segment->size) {
+- iscsi_tcp_segment_map(segment, recv);
+- return 0;
+- }
+-
+- segment->total_copied += segment->copied;
+- segment->copied = 0;
+- segment->size = 0;
+-
+- /* Unmap the current scatterlist page, if there is one. */
+- iscsi_tcp_segment_unmap(segment);
+-
+- /* Do we have more scatterlist entries? */
+- debug_tcp("total copied %u total size %u\n", segment->total_copied,
+- segment->total_size);
+- if (segment->total_copied < segment->total_size) {
+- /* Proceed to the next entry in the scatterlist. */
+- iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
+- 0);
+- iscsi_tcp_segment_map(segment, recv);
+- BUG_ON(segment->size == 0);
+- return 0;
+- }
+-
+- /* Do we need to handle padding? */
+- pad = iscsi_padding(segment->total_copied);
+- if (pad != 0) {
+- debug_tcp("consume %d pad bytes\n", pad);
+- segment->total_size += pad;
+- segment->size = pad;
+- segment->data = padbuf;
+- return 0;
+- }
+-
+- /*
+- * Set us up for transferring the data digest. hdr digest
+- * is completely handled in hdr done function.
+- */
+- if (segment->hash) {
+- crypto_hash_final(segment->hash, segment->digest);
+- iscsi_tcp_segment_splice_digest(segment,
+- recv ? segment->recv_digest : segment->digest);
+- return 0;
+- }
+-
+- return 1;
+-}
+-
+-/**
+- * iscsi_tcp_xmit_segment - transmit segment
+- * @tcp_conn: the iSCSI TCP connection
+- * @segment: the buffer to transmnit
+- *
+- * This function transmits as much of the buffer as
+- * the network layer will accept, and returns the number of
+- * bytes transmitted.
+- *
+- * If CRC hashing is enabled, the function will compute the
+- * hash as it goes. When the entire segment has been transmitted,
+- * it will retrieve the hash value and send it as well.
+- */
+-static int
+-iscsi_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct socket *sk = tcp_conn->sock;
+- unsigned int copied = 0;
+- int r = 0;
+-
+- while (!iscsi_tcp_segment_done(segment, 0, r)) {
+- struct scatterlist *sg;
+- unsigned int offset, copy;
+- int flags = 0;
+-
+- r = 0;
+- offset = segment->copied;
+- copy = segment->size - offset;
+-
+- if (segment->total_copied + segment->size < segment->total_size)
+- flags |= MSG_MORE;
+-
+- /* Use sendpage if we can; else fall back to sendmsg */
+- if (!segment->data) {
+- sg = segment->sg;
+- offset += segment->sg_offset + sg->offset;
+- r = tcp_conn->sendpage(sk, sg_page(sg), offset, copy,
+- flags);
++ if (skb_shinfo(skb)->frag_list == NULL &&
++ !skb_shinfo(skb)->nr_frags) {
++ tcp_conn->in.hdr = (struct iscsi_hdr *)
++ ((char*)skb->data + tcp_conn->in.offset);
++ tcp_conn->in.zero_copy_hdr = 1;
+ } else {
+- struct msghdr msg = { .msg_flags = flags };
+- struct kvec iov = {
+- .iov_base = segment->data + offset,
+- .iov_len = copy
+- };
+-
+- r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
++ /* ignoring return code since we checked
++ * in.copy before */
++ skb_copy_bits(skb, tcp_conn->in.offset,
++ &tcp_conn->hdr, tcp_conn->hdr_size);
++ tcp_conn->in.hdr = &tcp_conn->hdr;
+ }
++ tcp_conn->in.offset += tcp_conn->hdr_size;
++ tcp_conn->in.copy -= tcp_conn->hdr_size;
++ } else {
++ int hdr_remains;
++ int copylen;
+
+- if (r < 0) {
+- iscsi_tcp_segment_unmap(segment);
+- if (copied || r == -EAGAIN)
+- break;
+- return r;
+- }
+- copied += r;
+- }
+- return copied;
+-}
+-
+-/**
+- * iscsi_tcp_segment_recv - copy data to segment
+- * @tcp_conn: the iSCSI TCP connection
+- * @segment: the buffer to copy to
+- * @ptr: data pointer
+- * @len: amount of data available
+- *
+- * This function copies up to @len bytes to the
+- * given buffer, and returns the number of bytes
+- * consumed, which can actually be less than @len.
+- *
+- * If hash digest is enabled, the function will update the
+- * hash while copying.
+- * Combining these two operations doesn't buy us a lot (yet),
+- * but in the future we could implement combined copy+crc,
+- * just way we do for network layer checksums.
+- */
+-static int
+-iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment, const void *ptr,
+- unsigned int len)
+-{
+- unsigned int copy = 0, copied = 0;
+-
+- while (!iscsi_tcp_segment_done(segment, 1, copy)) {
+- if (copied == len) {
+- debug_tcp("iscsi_tcp_segment_recv copied %d bytes\n",
+- len);
+- break;
+- }
+-
+- copy = min(len - copied, segment->size - segment->copied);
+- debug_tcp("iscsi_tcp_segment_recv copying %d\n", copy);
+- memcpy(segment->data + segment->copied, ptr + copied, copy);
+- copied += copy;
+- }
+- return copied;
+-}
+-
+-static inline void
+-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
+- unsigned char digest[ISCSI_DIGEST_SIZE])
+-{
+- struct scatterlist sg;
+-
+- sg_init_one(&sg, hdr, hdrlen);
+- crypto_hash_digest(hash, &sg, hdrlen, digest);
+-}
+-
+-static inline int
+-iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- if (!segment->digest_len)
+- return 1;
+-
+- if (memcmp(segment->recv_digest, segment->digest,
+- segment->digest_len)) {
+- debug_scsi("digest mismatch\n");
+- return 0;
+- }
++ /*
++ * PDU header scattered across SKB's,
++ * copying it... This'll happen quite rarely.
++ */
+
+- return 1;
+-}
++ if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER)
++ tcp_conn->in.hdr_offset = 0;
+
+-/*
+- * Helper function to set up segment buffer
+- */
+-static inline void
+-__iscsi_segment_init(struct iscsi_segment *segment, size_t size,
+- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+-{
+- memset(segment, 0, sizeof(*segment));
+- segment->total_size = size;
+- segment->done = done;
++ hdr_remains = tcp_conn->hdr_size - tcp_conn->in.hdr_offset;
++ BUG_ON(hdr_remains <= 0);
+
+- if (hash) {
+- segment->hash = hash;
+- crypto_hash_init(hash);
+- }
+-}
++ copylen = min(tcp_conn->in.copy, hdr_remains);
++ skb_copy_bits(skb, tcp_conn->in.offset,
++ (char*)&tcp_conn->hdr + tcp_conn->in.hdr_offset,
++ copylen);
+
+-static inline void
+-iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
+- size_t size, iscsi_segment_done_fn_t *done,
+- struct hash_desc *hash)
+-{
+- __iscsi_segment_init(segment, size, done, hash);
+- segment->data = data;
+- segment->size = size;
+-}
++ debug_tcp("PDU gather offset %d bytes %d in.offset %d "
++ "in.copy %d\n", tcp_conn->in.hdr_offset, copylen,
++ tcp_conn->in.offset, tcp_conn->in.copy);
+
+-static inline int
+-iscsi_segment_seek_sg(struct iscsi_segment *segment,
+- struct scatterlist *sg_list, unsigned int sg_count,
+- unsigned int offset, size_t size,
+- iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+-{
+- struct scatterlist *sg;
+- unsigned int i;
+-
+- debug_scsi("iscsi_segment_seek_sg offset %u size %llu\n",
+- offset, size);
+- __iscsi_segment_init(segment, size, done, hash);
+- for_each_sg(sg_list, sg, sg_count, i) {
+- debug_scsi("sg %d, len %u offset %u\n", i, sg->length,
+- sg->offset);
+- if (offset < sg->length) {
+- iscsi_tcp_segment_init_sg(segment, sg, offset);
+- return 0;
++ tcp_conn->in.offset += copylen;
++ tcp_conn->in.copy -= copylen;
++ if (copylen < hdr_remains) {
++ tcp_conn->in_progress = IN_PROGRESS_HEADER_GATHER;
++ tcp_conn->in.hdr_offset += copylen;
++ return -EAGAIN;
+ }
+- offset -= sg->length;
++ tcp_conn->in.hdr = &tcp_conn->hdr;
++ tcp_conn->discontiguous_hdr_cnt++;
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ }
+
+- return ISCSI_ERR_DATA_OFFSET;
+-}
+-
+-/**
+- * iscsi_tcp_hdr_recv_prep - prep segment for hdr reception
+- * @tcp_conn: iscsi connection to prep for
+- *
+- * This function always passes NULL for the hash argument, because when this
+- * function is called we do not yet know the final size of the header and want
+- * to delay the digest processing until we know that.
+- */
+-static void
+-iscsi_tcp_hdr_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+-{
+- debug_tcp("iscsi_tcp_hdr_recv_prep(%p%s)\n", tcp_conn,
+- tcp_conn->iscsi_conn->hdrdgst_en ? ", digest enabled" : "");
+- iscsi_segment_init_linear(&tcp_conn->in.segment,
+- tcp_conn->in.hdr_buf, sizeof(struct iscsi_hdr),
+- iscsi_tcp_hdr_recv_done, NULL);
+-}
+-
+-/*
+- * Handle incoming reply to any other type of command
+- */
+-static int
+-iscsi_tcp_data_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- int rc = 0;
+-
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_DATA_DGST;
+-
+- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr,
+- conn->data, tcp_conn->in.datalen);
+- if (rc)
+- return rc;
+-
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+ }
+
+-static void
+-iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct hash_desc *rx_hash = NULL;
+-
+- if (conn->datadgst_en)
+- rx_hash = &tcp_conn->rx_hash;
+-
+- iscsi_segment_init_linear(&tcp_conn->in.segment,
+- conn->data, tcp_conn->in.datalen,
+- iscsi_tcp_data_recv_done, rx_hash);
+-}
+-
+ /*
+ * must be called with session lock
+ */
+ static void
+-iscsi_tcp_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
++ struct scsi_cmnd *sc;
+
+- /* nothing to do for mgmt tasks */
+- if (!task->sc)
+- return;
+-
+- /* flush task's r2t queues */
+- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
++ /* flush ctask's r2t queues */
++ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+- debug_scsi("iscsi_tcp_cleanup_task pending r2t dropped\n");
++ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
+- r2t = tcp_task->r2t;
+- if (r2t != NULL) {
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
+- tcp_task->r2t = NULL;
+- }
++ sc = ctask->sc;
++ if (unlikely(!sc))
++ return;
++
++ tcp_ctask->xmstate = XMSTATE_IDLE;
++ tcp_ctask->r2t = NULL;
+ }
+
+ /**
+ * iscsi_data_rsp - SCSI Data-In Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
++ int rc;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+ int datasn = be32_to_cpu(rhdr->datasn);
+- unsigned total_in_length = scsi_in(sc)->length;
+
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc)
++ return rc;
++ /*
++ * setup Data-In byte counter (gets decremented..)
++ */
++ ctask->data_count = tcp_conn->in.datalen;
++
+ if (tcp_conn->in.datalen == 0)
+ return 0;
+
+- if (tcp_task->exp_datasn != datasn) {
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->datasn(%d)\n",
+- __func__, tcp_task->exp_datasn, datasn);
++ if (ctask->datasn != datasn)
+ return ISCSI_ERR_DATASN;
+- }
+
+- tcp_task->exp_datasn++;
++ ctask->datasn++;
+
+- tcp_task->data_offset = be32_to_cpu(rhdr->offset);
+- if (tcp_task->data_offset + tcp_conn->in.datalen > total_in_length) {
+- debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
+- __func__, tcp_task->data_offset,
+- tcp_conn->in.datalen, total_in_length);
++ tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
++ if (tcp_ctask->data_offset + tcp_conn->in.datalen > ctask->total_length)
+ return ISCSI_ERR_DATA_OFFSET;
+- }
+
+ if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ struct scsi_cmnd *sc = ctask->sc;
++
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+- if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
+- ISCSI_FLAG_DATA_OVERFLOW)) {
++ if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+ if (res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= total_in_length))
+- scsi_in(sc)->resid = res_count;
+- else
++ res_count <= sc->request_bufflen) {
++ sc->resid = res_count;
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ } else
+ sc->result = (DID_BAD_TARGET << 16) |
+ rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
++ sc->resid = be32_to_cpu(rhdr->residual_count);
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
++ } else
++ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+ }
+
+ conn->datain_pdus_cnt++;
+@@ -582,7 +266,7 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ /**
+ * iscsi_solicit_data_init - initialize first Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ *
+ * Notes:
+@@ -592,10 +276,11 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ * This function is called with connection lock taken.
+ **/
+ static void
+-iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
++iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
+ struct iscsi_r2t_info *r2t)
+ {
+ struct iscsi_data *hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ hdr = &r2t->dtask.hdr;
+ memset(hdr, 0, sizeof(struct iscsi_data));
+@@ -603,8 +288,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ hdr->offset = cpu_to_be32(r2t->data_offset);
+ if (r2t->data_length > conn->max_xmit_dlength) {
+@@ -619,57 +304,94 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_task *task,
+ conn->dataout_pdus_cnt++;
+
+ r2t->sent = 0;
++
++ iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
++ sizeof(struct iscsi_hdr));
++
++ if (sc->use_sg) {
++ int i, sg_count = 0;
++ struct scatterlist *sg = sc->request_buffer;
++
++ r2t->sg = NULL;
++ for (i = 0; i < sc->use_sg; i++, sg += 1) {
++ /* FIXME: prefetch ? */
++ if (sg_count + sg->length > r2t->data_offset) {
++ int page_offset;
++
++ /* sg page found! */
++
++ /* offset within this page */
++ page_offset = r2t->data_offset - sg_count;
++
++ /* fill in this buffer */
++ iscsi_buf_init_sg(&r2t->sendbuf, sg);
++ r2t->sendbuf.sg.offset += page_offset;
++ r2t->sendbuf.sg.length -= page_offset;
++
++ /* xmit logic will continue with next one */
++ r2t->sg = sg + 1;
++ break;
++ }
++ sg_count += sg->length;
++ }
++ BUG_ON(r2t->sg == NULL);
++ } else {
++ iscsi_buf_init_iov(&r2t->sendbuf,
++ (char*)sc->request_buffer + r2t->data_offset,
++ r2t->data_count);
++ r2t->sg = NULL;
++ }
+ }
+
+ /**
+ * iscsi_r2t_rsp - iSCSI R2T Response processing
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ **/
+ static int
+-iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_r2t_info *r2t;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
+ int r2tsn = be32_to_cpu(rhdr->r2tsn);
+ int rc;
+
+ if (tcp_conn->in.datalen) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2t with datalen %d\n",
+- tcp_conn->in.datalen);
++ printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
++ tcp_conn->in.datalen);
+ return ISCSI_ERR_DATALEN;
+ }
+
+- if (tcp_task->exp_datasn != r2tsn){
+- debug_tcp("%s: task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
+- __func__, tcp_task->exp_datasn, r2tsn);
++ if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn)
+ return ISCSI_ERR_R2TSN;
+- }
+
+- /* fill-in new R2T associated with the task */
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc)
++ return rc;
+
+- if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
+- iscsi_conn_printk(KERN_INFO, conn,
+- "dropping R2T itt %d in recovery.\n",
+- task->itt);
++ /* FIXME: use R2TSN to detect missing R2T */
++
++ /* fill-in new R2T associated with the task */
++ spin_lock(&session->lock);
++ if (!ctask->sc || ctask->mtask ||
++ session->state != ISCSI_STATE_LOGGED_IN) {
++ printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
++ "recovery...\n", ctask->itt);
++ spin_unlock(&session->lock);
+ return 0;
+ }
+
+- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
++ rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ BUG_ON(!rc);
+
+ r2t->exp_statsn = rhdr->statsn;
+ r2t->data_length = be32_to_cpu(rhdr->data_length);
+ if (r2t->data_length == 0) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2T with zero data len\n");
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
++ printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
++ spin_unlock(&session->lock);
+ return ISCSI_ERR_DATALEN;
+ }
+
+@@ -679,159 +401,122 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
+ r2t->data_length, session->max_burst);
+
+ r2t->data_offset = be32_to_cpu(rhdr->data_offset);
+- if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid R2T with data len %u at offset %u "
+- "and total length %d\n", r2t->data_length,
+- r2t->data_offset, scsi_out(task->sc)->length);
+- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+- sizeof(void*));
++ if (r2t->data_offset + r2t->data_length > ctask->total_length) {
++ spin_unlock(&session->lock);
++ printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
++ "offset %u and total length %d\n", r2t->data_length,
++ r2t->data_offset, ctask->total_length);
+ return ISCSI_ERR_DATALEN;
+ }
+
+ r2t->ttt = rhdr->ttt; /* no flip */
+ r2t->solicit_datasn = 0;
+
+- iscsi_solicit_data_init(conn, task, r2t);
++ iscsi_solicit_data_init(conn, ctask, r2t);
+
+- tcp_task->exp_datasn = r2tsn + 1;
+- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+- conn->r2t_pdus_cnt++;
++ tcp_ctask->exp_r2tsn = r2tsn + 1;
++ __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
++ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
++ list_move_tail(&ctask->running, &conn->xmitqueue);
+
+- iscsi_requeue_task(task);
+- return 0;
+-}
+-
+-/*
+- * Handle incoming reply to DataIn command
+- */
+-static int
+-iscsi_tcp_process_data_in(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct iscsi_hdr *hdr = tcp_conn->in.hdr;
+- int rc;
+-
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_DATA_DGST;
+-
+- /* check for non-exceptional status */
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
+- if (rc)
+- return rc;
+- }
++ scsi_queue_work(session->host, &conn->xmitwork);
++ conn->r2t_pdus_cnt++;
++ spin_unlock(&session->lock);
+
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
+ return 0;
+ }
+
+-/**
+- * iscsi_tcp_hdr_dissect - process PDU header
+- * @conn: iSCSI connection
+- * @hdr: PDU header
+- *
+- * This function analyzes the header of the PDU received,
+- * and performs several sanity checks. If the PDU is accompanied
+- * by data, the receive buffer is set up to copy the incoming data
+- * to the correct location.
+- */
+ static int
+-iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
++iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
+ {
+ int rc = 0, opcode, ahslen;
++ struct iscsi_hdr *hdr;
++ struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_task *task;
++ uint32_t cdgst, rdgst = 0, itt;
++
++ hdr = tcp_conn->in.hdr;
+
+ /* verify PDU length */
+ tcp_conn->in.datalen = ntoh24(hdr->dlength);
+ if (tcp_conn->in.datalen > conn->max_recv_dlength) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "iscsi_tcp: datalen %d > %d\n",
+- tcp_conn->in.datalen, conn->max_recv_dlength);
++ printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
++ tcp_conn->in.datalen, conn->max_recv_dlength);
+ return ISCSI_ERR_DATALEN;
+ }
++ tcp_conn->data_copied = 0;
+
+- /* Additional header segments. So far, we don't
+- * process additional headers.
+- */
++ /* read AHS */
+ ahslen = hdr->hlength << 2;
++ tcp_conn->in.offset += ahslen;
++ tcp_conn->in.copy -= ahslen;
++ if (tcp_conn->in.copy < 0) {
++ printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
++ "%d bytes\n", ahslen);
++ return ISCSI_ERR_AHSLEN;
++ }
++
++ /* calculate read padding */
++ tcp_conn->in.padding = tcp_conn->in.datalen & (ISCSI_PAD_LEN-1);
++ if (tcp_conn->in.padding) {
++ tcp_conn->in.padding = ISCSI_PAD_LEN - tcp_conn->in.padding;
++ debug_scsi("read padding %d bytes\n", tcp_conn->in.padding);
++ }
++
++ if (conn->hdrdgst_en) {
++ struct scatterlist sg;
++
++ sg_init_one(&sg, (u8 *)hdr,
++ sizeof(struct iscsi_hdr) + ahslen);
++ crypto_hash_digest(&tcp_conn->rx_hash, &sg, sg.length,
++ (u8 *)&cdgst);
++ rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
++ ahslen);
++ if (cdgst != rdgst) {
++ printk(KERN_ERR "iscsi_tcp: hdrdgst error "
++ "recv 0x%x calc 0x%x\n", rdgst, cdgst);
++ return ISCSI_ERR_HDR_DGST;
++ }
++ }
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+ /* verify itt (itt encoding: age+cid+itt) */
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
++ rc = iscsi_verify_itt(conn, hdr, &itt);
++ if (rc == ISCSI_ERR_NO_SCSI_CMD) {
++ tcp_conn->in.datalen = 0; /* force drop */
++ return 0;
++ } else if (rc)
+ return rc;
+
+- debug_tcp("opcode 0x%x ahslen %d datalen %d\n",
+- opcode, ahslen, tcp_conn->in.datalen);
++ debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
++ opcode, tcp_conn->in.offset, tcp_conn->in.copy,
++ ahslen, tcp_conn->in.datalen);
+
+ switch(opcode) {
+ case ISCSI_OP_SCSI_DATA_IN:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else
+- rc = iscsi_data_rsp(conn, task);
+- if (rc) {
+- spin_unlock(&conn->session->lock);
+- break;
+- }
+-
+- if (tcp_conn->in.datalen) {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct hash_desc *rx_hash = NULL;
+- struct scsi_data_buffer *sdb = scsi_in(task->sc);
+-
+- /*
+- * Setup copy of Data-In into the Scsi_Cmnd
+- * Scatterlist case:
+- * We set up the iscsi_segment to point to the next
+- * scatterlist entry to copy to. As we go along,
+- * we move on to the next scatterlist entry and
+- * update the digest per-entry.
+- */
+- if (conn->datadgst_en)
+- rx_hash = &tcp_conn->rx_hash;
+-
+- debug_tcp("iscsi_tcp_begin_data_in(%p, offset=%d, "
+- "datalen=%d)\n", tcp_conn,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen);
+- rc = iscsi_segment_seek_sg(&tcp_conn->in.segment,
+- sdb->table.sgl,
+- sdb->table.nents,
+- tcp_task->data_offset,
+- tcp_conn->in.datalen,
+- iscsi_tcp_process_data_in,
+- rx_hash);
+- spin_unlock(&conn->session->lock);
++ tcp_conn->in.ctask = session->cmds[itt];
++ rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
++ if (rc)
+ return rc;
+- }
+- rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
+- spin_unlock(&conn->session->lock);
+- break;
++ /* fall through */
+ case ISCSI_OP_SCSI_CMD_RSP:
+- if (tcp_conn->in.datalen) {
+- iscsi_tcp_data_recv_prep(tcp_conn);
+- return 0;
+- }
+- rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
++ tcp_conn->in.ctask = session->cmds[itt];
++ if (tcp_conn->in.datalen)
++ goto copy_hdr;
++
++ spin_lock(&session->lock);
++ rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
++ spin_unlock(&session->lock);
+ break;
+ case ISCSI_OP_R2T:
+- spin_lock(&conn->session->lock);
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- rc = ISCSI_ERR_BAD_ITT;
+- else if (ahslen)
++ tcp_conn->in.ctask = session->cmds[itt];
++ if (ahslen)
+ rc = ISCSI_ERR_AHSLEN;
+- else if (task->sc->sc_data_direction == DMA_TO_DEVICE)
+- rc = iscsi_r2t_rsp(conn, task);
++ else if (tcp_conn->in.ctask->sc->sc_data_direction ==
++ DMA_TO_DEVICE)
++ rc = iscsi_r2t_rsp(conn, tcp_conn->in.ctask);
+ else
+ rc = ISCSI_ERR_PROTO;
+- spin_unlock(&conn->session->lock);
+ break;
+ case ISCSI_OP_LOGIN_RSP:
+ case ISCSI_OP_TEXT_RSP:
+@@ -842,24 +527,18 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ * than 8K, but there are no targets that currently do this.
+ * For now we fail until we find a vendor that needs it
+ */
+- if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "iscsi_tcp: received buffer of "
+- "len %u but conn buffer is only %u "
+- "(opcode %0x)\n",
+- tcp_conn->in.datalen,
+- ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
++ if (ISCSI_DEF_MAX_RECV_SEG_LEN <
++ tcp_conn->in.datalen) {
++ printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
++ "but conn buffer is only %u (opcode %0x)\n",
++ tcp_conn->in.datalen,
++ ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+- /* If there's data coming in with the response,
+- * receive it to the connection's buffer.
+- */
+- if (tcp_conn->in.datalen) {
+- iscsi_tcp_data_recv_prep(tcp_conn);
+- return 0;
+- }
++ if (tcp_conn->in.datalen)
++ goto copy_hdr;
+ /* fall through */
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_NOOP_IN:
+@@ -871,161 +550,457 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ break;
+ }
+
+- if (rc == 0) {
+- /* Anything that comes with data should have
+- * been handled above. */
+- if (tcp_conn->in.datalen)
+- return ISCSI_ERR_PROTO;
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
++ return rc;
++
++copy_hdr:
++ /*
++ * if we did zero copy for the header but we will need multiple
++ * skbs to complete the command then we have to copy the header
++ * for later use
++ */
++ if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
++ (tcp_conn->in.datalen + tcp_conn->in.padding +
++ (conn->datadgst_en ? 4 : 0))) {
++ debug_tcp("Copying header for later use. in.copy %d in.datalen"
++ " %d\n", tcp_conn->in.copy, tcp_conn->in.datalen);
++ memcpy(&tcp_conn->hdr, tcp_conn->in.hdr,
++ sizeof(struct iscsi_hdr));
++ tcp_conn->in.hdr = &tcp_conn->hdr;
++ tcp_conn->in.zero_copy_hdr = 0;
+ }
++ return 0;
++}
+
+- return rc;
++/**
++ * iscsi_ctask_copy - copy skb bits to the destanation cmd task
++ * @conn: iscsi tcp connection
++ * @ctask: scsi command task
++ * @buf: buffer to copy to
++ * @buf_size: size of buffer
++ * @offset: offset within the buffer
++ *
++ * Notes:
++ * The function calls skb_copy_bits() and updates per-connection and
++ * per-cmd byte counters.
++ *
++ * Read counters (in bytes):
++ *
++ * conn->in.offset offset within in progress SKB
++ * conn->in.copy left to copy from in progress SKB
++ * including padding
++ * conn->in.copied copied already from in progress SKB
++ * conn->data_copied copied already from in progress buffer
++ * ctask->sent total bytes sent up to the MidLayer
++ * ctask->data_count left to copy from in progress Data-In
++ * buf_left left to copy from in progress buffer
++ **/
++static inline int
++iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
++ void *buf, int buf_size, int offset)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int buf_left = buf_size - (tcp_conn->data_copied + offset);
++ int size = min(tcp_conn->in.copy, buf_left);
++ int rc;
++
++ size = min(size, ctask->data_count);
++
++ debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
++ size, tcp_conn->in.offset, tcp_conn->in.copied);
++
++ BUG_ON(size <= 0);
++ BUG_ON(tcp_ctask->sent + size > ctask->total_length);
++
++ rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
++ (char*)buf + (offset + tcp_conn->data_copied), size);
++ /* must fit into skb->len */
++ BUG_ON(rc);
++
++ tcp_conn->in.offset += size;
++ tcp_conn->in.copy -= size;
++ tcp_conn->in.copied += size;
++ tcp_conn->data_copied += size;
++ tcp_ctask->sent += size;
++ ctask->data_count -= size;
++
++ BUG_ON(tcp_conn->in.copy < 0);
++ BUG_ON(ctask->data_count < 0);
++
++ if (buf_size != (tcp_conn->data_copied + offset)) {
++ if (!ctask->data_count) {
++ BUG_ON(buf_size - tcp_conn->data_copied < 0);
++ /* done with this PDU */
++ return buf_size - tcp_conn->data_copied;
++ }
++ return -EAGAIN;
++ }
++
++ /* done with this buffer or with both - PDU and buffer */
++ tcp_conn->data_copied = 0;
++ return 0;
+ }
+
+ /**
+- * iscsi_tcp_hdr_recv_done - process PDU header
++ * iscsi_tcp_copy - copy skb bits to the destanation buffer
++ * @conn: iscsi tcp connection
+ *
+- * This is the callback invoked when the PDU header has
+- * been received. If the header is followed by additional
+- * header segments, we go back for more data.
+- */
+-static int
+-iscsi_tcp_hdr_recv_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
++ * Notes:
++ * The function calls skb_copy_bits() and updates per-connection
++ * byte counters.
++ **/
++static inline int
++iscsi_tcp_copy(struct iscsi_conn *conn, int buf_size)
+ {
+- struct iscsi_conn *conn = tcp_conn->iscsi_conn;
+- struct iscsi_hdr *hdr;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int buf_left = buf_size - tcp_conn->data_copied;
++ int size = min(tcp_conn->in.copy, buf_left);
++ int rc;
++
++ debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
++ size, tcp_conn->in.offset, tcp_conn->data_copied);
++ BUG_ON(size <= 0);
++
++ rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
++ (char*)conn->data + tcp_conn->data_copied, size);
++ BUG_ON(rc);
++
++ tcp_conn->in.offset += size;
++ tcp_conn->in.copy -= size;
++ tcp_conn->in.copied += size;
++ tcp_conn->data_copied += size;
++
++ if (buf_size != tcp_conn->data_copied)
++ return -EAGAIN;
++
++ return 0;
++}
++
++static inline void
++partial_sg_digest_update(struct hash_desc *desc, struct scatterlist *sg,
++ int offset, int length)
++{
++ struct scatterlist temp;
+
+- /* Check if there are additional header segments
+- * *prior* to computing the digest, because we
+- * may need to go back to the caller for more.
++ memcpy(&temp, sg, sizeof(struct scatterlist));
++ temp.offset = offset;
++ temp.length = length;
++ crypto_hash_update(desc, &temp, length);
++}
++
++static void
++iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
++{
++ struct scatterlist tmp;
++
++ sg_init_one(&tmp, buf, len);
++ crypto_hash_update(&tcp_conn->rx_hash, &tmp, len);
++}
++
++static int iscsi_scsi_data_in(struct iscsi_conn *conn)
++{
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ struct iscsi_cmd_task *ctask = tcp_conn->in.ctask;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
++ struct scatterlist *sg;
++ int i, offset, rc = 0;
++
++ BUG_ON((void*)ctask != sc->SCp.ptr);
++
++ /*
++ * copying Data-In into the Scsi_Cmnd
+ */
+- hdr = (struct iscsi_hdr *) tcp_conn->in.hdr_buf;
+- if (segment->copied == sizeof(struct iscsi_hdr) && hdr->hlength) {
+- /* Bump the header length - the caller will
+- * just loop around and get the AHS for us, and
+- * call again. */
+- unsigned int ahslen = hdr->hlength << 2;
+-
+- /* Make sure we don't overflow */
+- if (sizeof(*hdr) + ahslen > sizeof(tcp_conn->in.hdr_buf))
+- return ISCSI_ERR_AHSLEN;
+-
+- segment->total_size += ahslen;
+- segment->size += ahslen;
+- return 0;
++ if (!sc->use_sg) {
++ i = ctask->data_count;
++ rc = iscsi_ctask_copy(tcp_conn, ctask, sc->request_buffer,
++ sc->request_bufflen,
++ tcp_ctask->data_offset);
++ if (rc == -EAGAIN)
++ return rc;
++ if (conn->datadgst_en)
++ iscsi_recv_digest_update(tcp_conn, sc->request_buffer,
++ i);
++ rc = 0;
++ goto done;
+ }
+
+- /* We're done processing the header. See if we're doing
+- * header digests; if so, set up the recv_digest buffer
+- * and go back for more. */
+- if (conn->hdrdgst_en) {
+- if (segment->digest_len == 0) {
+- iscsi_tcp_segment_splice_digest(segment,
+- segment->recv_digest);
+- return 0;
++ offset = tcp_ctask->data_offset;
++ sg = sc->request_buffer;
++
++ if (tcp_ctask->data_offset)
++ for (i = 0; i < tcp_ctask->sg_count; i++)
++ offset -= sg[i].length;
++ /* we've passed through partial sg*/
++ if (offset < 0)
++ offset = 0;
++
++ for (i = tcp_ctask->sg_count; i < sc->use_sg; i++) {
++ char *dest;
++
++ dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
++ rc = iscsi_ctask_copy(tcp_conn, ctask, dest + sg[i].offset,
++ sg[i].length, offset);
++ kunmap_atomic(dest, KM_SOFTIRQ0);
++ if (rc == -EAGAIN)
++ /* continue with the next SKB/PDU */
++ return rc;
++ if (!rc) {
++ if (conn->datadgst_en) {
++ if (!offset)
++ crypto_hash_update(
++ &tcp_conn->rx_hash,
++ &sg[i], sg[i].length);
++ else
++ partial_sg_digest_update(
++ &tcp_conn->rx_hash,
++ &sg[i],
++ sg[i].offset + offset,
++ sg[i].length - offset);
++ }
++ offset = 0;
++ tcp_ctask->sg_count++;
+ }
+- iscsi_tcp_dgst_header(&tcp_conn->rx_hash, hdr,
+- segment->total_copied - ISCSI_DIGEST_SIZE,
+- segment->digest);
+
+- if (!iscsi_tcp_dgst_verify(tcp_conn, segment))
+- return ISCSI_ERR_HDR_DGST;
++ if (!ctask->data_count) {
++ if (rc && conn->datadgst_en)
++ /*
++ * data-in is complete, but buffer not...
++ */
++ partial_sg_digest_update(&tcp_conn->rx_hash,
++ &sg[i],
++ sg[i].offset,
++ sg[i].length-rc);
++ rc = 0;
++ break;
++ }
++
++ if (!tcp_conn->in.copy)
++ return -EAGAIN;
+ }
++ BUG_ON(ctask->data_count);
+
+- tcp_conn->in.hdr = hdr;
+- return iscsi_tcp_hdr_dissect(conn, hdr);
++done:
++ /* check for non-exceptional status */
++ if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
++ (long)sc, sc->result, ctask->itt,
++ tcp_conn->in.hdr->flags);
++ spin_lock(&conn->session->lock);
++ __iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
++ spin_unlock(&conn->session->lock);
++ }
++
++ return rc;
++}
++
++static int
++iscsi_data_recv(struct iscsi_conn *conn)
++{
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int rc = 0, opcode;
++
++ opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
++ switch (opcode) {
++ case ISCSI_OP_SCSI_DATA_IN:
++ rc = iscsi_scsi_data_in(conn);
++ break;
++ case ISCSI_OP_SCSI_CMD_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_ASYNC_EVENT:
++ case ISCSI_OP_REJECT:
++ /*
++ * Collect data segment to the connection's data
++ * placeholder
++ */
++ if (iscsi_tcp_copy(conn, tcp_conn->in.datalen)) {
++ rc = -EAGAIN;
++ goto exit;
++ }
++
++ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
++ tcp_conn->in.datalen);
++ if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
++ iscsi_recv_digest_update(tcp_conn, conn->data,
++ tcp_conn->in.datalen);
++ break;
++ default:
++ BUG_ON(1);
++ }
++exit:
++ return rc;
+ }
+
+ /**
+- * iscsi_tcp_recv - TCP receive in sendfile fashion
++ * iscsi_tcp_data_recv - TCP receive in sendfile fashion
+ * @rd_desc: read descriptor
+ * @skb: socket buffer
+ * @offset: offset in skb
+ * @len: skb->len - offset
+ **/
+ static int
+-iscsi_tcp_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
+- unsigned int offset, size_t len)
++iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
++ unsigned int offset, size_t len)
+ {
++ int rc;
+ struct iscsi_conn *conn = rd_desc->arg.data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->in.segment;
+- struct skb_seq_state seq;
+- unsigned int consumed = 0;
+- int rc = 0;
++ int processed;
++ char pad[ISCSI_PAD_LEN];
++ struct scatterlist sg;
+
+- debug_tcp("in %d bytes\n", skb->len - offset);
++ /*
++ * Save current SKB and its offset in the corresponding
++ * connection context.
++ */
++ tcp_conn->in.copy = skb->len - offset;
++ tcp_conn->in.offset = offset;
++ tcp_conn->in.skb = skb;
++ tcp_conn->in.len = tcp_conn->in.copy;
++ BUG_ON(tcp_conn->in.copy <= 0);
++ debug_tcp("in %d bytes\n", tcp_conn->in.copy);
++
++more:
++ tcp_conn->in.copied = 0;
++ rc = 0;
+
+ if (unlikely(conn->suspend_rx)) {
+ debug_tcp("conn %d Rx suspended!\n", conn->id);
+ return 0;
+ }
+
+- skb_prepare_seq_read(skb, offset, skb->len, &seq);
+- while (1) {
+- unsigned int avail;
+- const u8 *ptr;
++ if (tcp_conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
++ tcp_conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
++ rc = iscsi_hdr_extract(tcp_conn);
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto nomore;
++ else {
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++ }
+
+- avail = skb_seq_read(consumed, &ptr, &seq);
+- if (avail == 0) {
+- debug_tcp("no more data avail. Consumed %d\n",
+- consumed);
+- break;
++ /*
++ * Verify and process incoming PDU header.
++ */
++ rc = iscsi_tcp_hdr_recv(conn);
++ if (!rc && tcp_conn->in.datalen) {
++ if (conn->datadgst_en)
++ crypto_hash_init(&tcp_conn->rx_hash);
++ tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
++ } else if (rc) {
++ iscsi_conn_failure(conn, rc);
++ return 0;
+ }
+- BUG_ON(segment->copied >= segment->size);
+-
+- debug_tcp("skb %p ptr=%p avail=%u\n", skb, ptr, avail);
+- rc = iscsi_tcp_segment_recv(tcp_conn, segment, ptr, avail);
+- BUG_ON(rc == 0);
+- consumed += rc;
+-
+- if (segment->total_copied >= segment->total_size) {
+- debug_tcp("segment done\n");
+- rc = segment->done(tcp_conn, segment);
+- if (rc != 0) {
+- skb_abort_seq_read(&seq);
+- goto error;
+- }
++ }
++
++ if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
++ uint32_t recv_digest;
+
+- /* The done() functions sets up the
+- * next segment. */
++ debug_tcp("extra data_recv offset %d copy %d\n",
++ tcp_conn->in.offset, tcp_conn->in.copy);
++ rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto again;
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++
++ memcpy(&recv_digest, conn->data, sizeof(uint32_t));
++ if (recv_digest != tcp_conn->in.datadgst) {
++ debug_tcp("iscsi_tcp: data digest error!"
++ "0x%x != 0x%x\n", recv_digest,
++ tcp_conn->in.datadgst);
++ iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
++ return 0;
++ } else {
++ debug_tcp("iscsi_tcp: data digest match!"
++ "0x%x == 0x%x\n", recv_digest,
++ tcp_conn->in.datadgst);
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
+ }
+ }
+- skb_abort_seq_read(&seq);
+- conn->rxdata_octets += consumed;
+- return consumed;
+
+-error:
+- debug_tcp("Error receiving PDU, errno=%d\n", rc);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- return 0;
++ if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV &&
++ tcp_conn->in.copy) {
++
++ debug_tcp("data_recv offset %d copy %d\n",
++ tcp_conn->in.offset, tcp_conn->in.copy);
++
++ rc = iscsi_data_recv(conn);
++ if (rc) {
++ if (rc == -EAGAIN)
++ goto again;
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return 0;
++ }
++ tcp_conn->in.copy -= tcp_conn->in.padding;
++ tcp_conn->in.offset += tcp_conn->in.padding;
++ if (conn->datadgst_en) {
++ if (tcp_conn->in.padding) {
++ debug_tcp("padding -> %d\n",
++ tcp_conn->in.padding);
++ memset(pad, 0, tcp_conn->in.padding);
++ sg_init_one(&sg, pad, tcp_conn->in.padding);
++ crypto_hash_update(&tcp_conn->rx_hash,
++ &sg, sg.length);
++ }
++ crypto_hash_final(&tcp_conn->rx_hash,
++ (u8 *) &tcp_conn->in.datadgst);
++ debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
++ tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
++ tcp_conn->data_copied = 0;
++ } else
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++ }
++
++ debug_tcp("f, processed %d from out of %d padding %d\n",
++ tcp_conn->in.offset - offset, (int)len, tcp_conn->in.padding);
++ BUG_ON(tcp_conn->in.offset - offset > len);
++
++ if (tcp_conn->in.offset - offset != len) {
++ debug_tcp("continue to process %d bytes\n",
++ (int)len - (tcp_conn->in.offset - offset));
++ goto more;
++ }
++
++nomore:
++ processed = tcp_conn->in.offset - offset;
++ BUG_ON(processed == 0);
++ return processed;
++
++again:
++ processed = tcp_conn->in.offset - offset;
++ debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
++ processed, (int)len, (int)rd_desc->count);
++ BUG_ON(processed == 0);
++ BUG_ON(processed > len);
++
++ conn->rxdata_octets += processed;
++ return processed;
+ }
+
+ static void
+ iscsi_tcp_data_ready(struct sock *sk, int flag)
+ {
+ struct iscsi_conn *conn = sk->sk_user_data;
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ read_descriptor_t rd_desc;
+
+ read_lock(&sk->sk_callback_lock);
+
+ /*
+- * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
++ * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv.
+ * We set count to 1 because we want the network layer to
+- * hand us all the skbs that are available. iscsi_tcp_recv
++ * hand us all the skbs that are available. iscsi_tcp_data_recv
+ * handled pdus that cross buffers or pdus that still need data.
+ */
+ rd_desc.arg.data = conn;
+ rd_desc.count = 1;
+- tcp_read_sock(sk, &rd_desc, iscsi_tcp_recv);
++ tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
+
+ read_unlock(&sk->sk_callback_lock);
+-
+- /* If we had to (atomically) map a highmem page,
+- * unmap it now. */
+- iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+ }
+
+ static void
+@@ -1105,179 +1080,127 @@ iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
+ }
+
+ /**
+- * iscsi_xmit - TCP transmit
+- **/
+-static int
+-iscsi_xmit(struct iscsi_conn *conn)
++ * iscsi_send - generic send routine
++ * @sk: kernel's socket
++ * @buf: buffer to write from
++ * @size: actual size to write
++ * @flags: socket's flags
++ */
++static inline int
++iscsi_send(struct iscsi_conn *conn, struct iscsi_buf *buf, int size, int flags)
+ {
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->out.segment;
+- unsigned int consumed = 0;
+- int rc = 0;
+-
+- while (1) {
+- rc = iscsi_tcp_xmit_segment(tcp_conn, segment);
+- if (rc < 0)
+- goto error;
+- if (rc == 0)
+- break;
+-
+- consumed += rc;
++ struct socket *sk = tcp_conn->sock;
++ int offset = buf->sg.offset + buf->sent, res;
+
+- if (segment->total_copied >= segment->total_size) {
+- if (segment->done != NULL) {
+- rc = segment->done(tcp_conn, segment);
+- if (rc < 0)
+- goto error;
+- }
+- }
++ /*
++ * if we got use_sg=0 or are sending something we kmallocd
++ * then we did not have to do kmap (kmap returns page_address)
++ *
++ * if we got use_sg > 0, but had to drop down, we do not
++ * set clustering so this should only happen for that
++ * slab case.
++ */
++ if (buf->use_sendmsg)
++ res = sock_no_sendpage(sk, buf->sg.page, offset, size, flags);
++ else
++ res = tcp_conn->sendpage(sk, buf->sg.page, offset, size, flags);
++
++ if (res >= 0) {
++ conn->txdata_octets += res;
++ buf->sent += res;
++ return res;
+ }
+
+- debug_tcp("xmit %d bytes\n", consumed);
+-
+- conn->txdata_octets += consumed;
+- return consumed;
+-
+-error:
+- /* Transmit error. We could initiate error recovery
+- * here. */
+- debug_tcp("Error sending PDU, errno=%d\n", rc);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- return rc;
++ tcp_conn->sendpage_failures_cnt++;
++ if (res == -EAGAIN)
++ res = -ENOBUFS;
++ else
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ return res;
+ }
+
+ /**
+- * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+- */
+-static inline int
+-iscsi_tcp_xmit_qlen(struct iscsi_conn *conn)
+-{
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct iscsi_segment *segment = &tcp_conn->out.segment;
+-
+- return segment->total_copied - segment->total_size;
+-}
+-
++ * iscsi_sendhdr - send PDU Header via tcp_sendpage()
++ * @conn: iscsi connection
++ * @buf: buffer to write from
++ * @datalen: lenght of data to be sent after the header
++ *
++ * Notes:
++ * (Tx, Fast Path)
++ **/
+ static inline int
+-iscsi_tcp_flush(struct iscsi_conn *conn)
++iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
+ {
+- int rc;
+-
+- while (iscsi_tcp_xmit_qlen(conn)) {
+- rc = iscsi_xmit(conn);
+- if (rc == 0)
++ int flags = 0; /* MSG_DONTWAIT; */
++ int res, size;
++
++ size = buf->sg.length - buf->sent;
++ BUG_ON(buf->sent + size > buf->sg.length);
++ if (buf->sent + size != buf->sg.length || datalen)
++ flags |= MSG_MORE;
++
++ res = iscsi_send(conn, buf, size, flags);
++ debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
++ if (res >= 0) {
++ if (size != res)
+ return -EAGAIN;
+- if (rc < 0)
+- return rc;
++ return 0;
+ }
+
+- return 0;
+-}
+-
+-/*
+- * This is called when we're done sending the header.
+- * Simply copy the data_segment to the send segment, and return.
+- */
+-static int
+-iscsi_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
+- struct iscsi_segment *segment)
+-{
+- tcp_conn->out.segment = tcp_conn->out.data_segment;
+- debug_tcp("Header done. Next segment size %u total_size %u\n",
+- tcp_conn->out.segment.size, tcp_conn->out.segment.total_size);
+- return 0;
++ return res;
+ }
+
+-static void
+-iscsi_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr, size_t hdrlen)
++/**
++ * iscsi_sendpage - send one page of iSCSI Data-Out.
++ * @conn: iscsi connection
++ * @buf: buffer to write from
++ * @count: remaining data
++ * @sent: number of bytes sent
++ *
++ * Notes:
++ * (Tx, Fast Path)
++ **/
++static inline int
++iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
++ int *count, int *sent)
+ {
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+-
+- debug_tcp("%s(%p%s)\n", __func__, tcp_conn,
+- conn->hdrdgst_en? ", digest enabled" : "");
+-
+- /* Clear the data segment - needs to be filled in by the
+- * caller using iscsi_tcp_send_data_prep() */
+- memset(&tcp_conn->out.data_segment, 0, sizeof(struct iscsi_segment));
+-
+- /* If header digest is enabled, compute the CRC and
+- * place the digest into the same buffer. We make
+- * sure that both iscsi_tcp_task and mtask have
+- * sufficient room.
+- */
+- if (conn->hdrdgst_en) {
+- iscsi_tcp_dgst_header(&tcp_conn->tx_hash, hdr, hdrlen,
+- hdr + hdrlen);
+- hdrlen += ISCSI_DIGEST_SIZE;
++ int flags = 0; /* MSG_DONTWAIT; */
++ int res, size;
++
++ size = buf->sg.length - buf->sent;
++ BUG_ON(buf->sent + size > buf->sg.length);
++ if (size > *count)
++ size = *count;
++ if (buf->sent + size != buf->sg.length || *count != size)
++ flags |= MSG_MORE;
++
++ res = iscsi_send(conn, buf, size, flags);
++ debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
++ size, buf->sent, *count, *sent, res);
++ if (res >= 0) {
++ *count -= res;
++ *sent += res;
++ if (size != res)
++ return -EAGAIN;
++ return 0;
+ }
+
+- /* Remember header pointer for later, when we need
+- * to decide whether there's a payload to go along
+- * with the header. */
+- tcp_conn->out.hdr = hdr;
+-
+- iscsi_segment_init_linear(&tcp_conn->out.segment, hdr, hdrlen,
+- iscsi_tcp_send_hdr_done, NULL);
++ return res;
+ }
+
+-/*
+- * Prepare the send buffer for the payload data.
+- * Padding and checksumming will all be taken care
+- * of by the iscsi_segment routines.
+- */
+-static int
+-iscsi_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
+- unsigned int count, unsigned int offset,
+- unsigned int len)
+-{
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct hash_desc *tx_hash = NULL;
+- unsigned int hdr_spec_len;
+-
+- debug_tcp("%s(%p, offset=%d, datalen=%d%s)\n", __func__,
+- tcp_conn, offset, len,
+- conn->datadgst_en? ", digest enabled" : "");
+-
+- /* Make sure the datalen matches what the caller
+- said he would send. */
+- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+- WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+-
+- if (conn->datadgst_en)
+- tx_hash = &tcp_conn->tx_hash;
+-
+- return iscsi_segment_seek_sg(&tcp_conn->out.data_segment,
+- sg, count, offset, len,
+- NULL, tx_hash);
+-}
+-
+-static void
+-iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+- size_t len)
++static inline void
++iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
++ struct iscsi_tcp_cmd_task *tcp_ctask)
+ {
+- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct hash_desc *tx_hash = NULL;
+- unsigned int hdr_spec_len;
+-
+- debug_tcp("%s(%p, datalen=%d%s)\n", __func__, tcp_conn, len,
+- conn->datadgst_en? ", digest enabled" : "");
+-
+- /* Make sure the datalen matches what the caller
+- said he would send. */
+- hdr_spec_len = ntoh24(tcp_conn->out.hdr->dlength);
+- WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
+-
+- if (conn->datadgst_en)
+- tx_hash = &tcp_conn->tx_hash;
+-
+- iscsi_segment_init_linear(&tcp_conn->out.data_segment,
+- data, len, NULL, tx_hash);
++ crypto_hash_init(&tcp_conn->tx_hash);
++ tcp_ctask->digest_count = 4;
+ }
+
+ /**
+ * iscsi_solicit_data_cont - initialize next Data-Out
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @r2t: R2T info
+ * @left: bytes left to transfer
+ *
+@@ -1287,17 +1210,13 @@ iscsi_tcp_send_linear_data_prepare(struct iscsi_conn *conn, void *data,
+ *
+ * Called under connection lock.
+ **/
+-static int
+-iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+- struct iscsi_r2t_info *r2t)
++static void
++iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ struct iscsi_r2t_info *r2t, int left)
+ {
+ struct iscsi_data *hdr;
+- int new_offset, left;
+-
+- BUG_ON(r2t->data_length - r2t->sent < 0);
+- left = r2t->data_length - r2t->sent;
+- if (left == 0)
+- return 0;
++ struct scsi_cmnd *sc = ctask->sc;
++ int new_offset;
+
+ hdr = &r2t->dtask.hdr;
+ memset(hdr, 0, sizeof(struct iscsi_data));
+@@ -1305,8 +1224,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
+ r2t->solicit_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->itt = task->hdr->itt;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = r2t->exp_statsn;
+ new_offset = r2t->data_offset + r2t->sent;
+ hdr->offset = cpu_to_be32(new_offset);
+@@ -1318,177 +1237,514 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_task *task,
+ r2t->data_count = left;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+-
+ conn->dataout_pdus_cnt++;
+- return 1;
++
++ iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
++ sizeof(struct iscsi_hdr));
++
++ if (iscsi_buf_left(&r2t->sendbuf))
++ return;
++
++ if (sc->use_sg) {
++ iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
++ r2t->sg += 1;
++ } else {
++ iscsi_buf_init_iov(&r2t->sendbuf,
++ (char*)sc->request_buffer + new_offset,
++ r2t->data_count);
++ r2t->sg = NULL;
++ }
++}
++
++static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
++ unsigned long len)
++{
++ tcp_ctask->pad_count = len & (ISCSI_PAD_LEN - 1);
++ if (!tcp_ctask->pad_count)
++ return;
++
++ tcp_ctask->pad_count = ISCSI_PAD_LEN - tcp_ctask->pad_count;
++ debug_scsi("write padding %d bytes\n", tcp_ctask->pad_count);
++ tcp_ctask->xmstate |= XMSTATE_W_PAD;
+ }
+
+ /**
+- * iscsi_tcp_task - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
++ * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ * @conn: iscsi connection
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @sc: scsi command
+ **/
++static void
++iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
++{
++ struct scsi_cmnd *sc = ctask->sc;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++
++ BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
++
++ tcp_ctask->sent = 0;
++ tcp_ctask->sg_count = 0;
++
++ if (sc->sc_data_direction == DMA_TO_DEVICE) {
++ tcp_ctask->xmstate = XMSTATE_W_HDR;
++ tcp_ctask->exp_r2tsn = 0;
++ BUG_ON(ctask->total_length == 0);
++
++ if (sc->use_sg) {
++ struct scatterlist *sg = sc->request_buffer;
++
++ iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
++ tcp_ctask->sg = sg + 1;
++ tcp_ctask->bad_sg = sg + sc->use_sg;
++ } else {
++ iscsi_buf_init_iov(&tcp_ctask->sendbuf,
++ sc->request_buffer,
++ sc->request_bufflen);
++ tcp_ctask->sg = NULL;
++ tcp_ctask->bad_sg = NULL;
++ }
++ debug_scsi("cmd [itt 0x%x total %d imm_data %d "
++ "unsol count %d, unsol offset %d]\n",
++ ctask->itt, ctask->total_length, ctask->imm_count,
++ ctask->unsol_count, ctask->unsol_offset);
++ } else
++ tcp_ctask->xmstate = XMSTATE_R_HDR;
++
++ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
++ sizeof(struct iscsi_hdr));
++}
++
++/**
++ * iscsi_tcp_mtask_xmit - xmit management(immediate) task
++ * @conn: iscsi connection
++ * @mtask: task management task
++ *
++ * Notes:
++ * The function can return -EAGAIN in which case caller must
++ * call it again later, or recover. '0' return code means successful
++ * xmit.
++ *
++ * Management xmit state machine consists of two states:
++ * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
++ * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
++ **/
+ static int
+-iscsi_tcp_task_init(struct iscsi_task *task)
++iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
+ {
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct iscsi_conn *conn = task->conn;
+- struct scsi_cmnd *sc = task->sc;
+- int err;
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++ int rc;
+
+- if (!sc) {
+- /*
+- * mgmt tasks do not have a scatterlist since they come
+- * in from the iscsi interface.
++ debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
++ conn->id, tcp_mtask->xmstate, mtask->itt);
++
++ if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
++ if (mtask->data_count)
++ tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
++ if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
++ conn->stop_stage != STOP_CONN_RECOVER &&
++ conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
++ (u8*)tcp_mtask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
++ mtask->data_count);
++ if (rc) {
++ tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
++ if (mtask->data_count)
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
++ return rc;
++ }
++ }
++
++ if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
++ BUG_ON(!mtask->data_count);
++ tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
++ /* FIXME: implement.
++ * Virtual buffer could be spreaded across multiple pages...
+ */
+- debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id,
+- task->itt);
++ do {
++ int rc;
++
++ rc = iscsi_sendpage(conn, &tcp_mtask->sendbuf,
++ &mtask->data_count, &tcp_mtask->sent);
++ if (rc) {
++ tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
++ return rc;
++ }
++ } while (mtask->data_count);
++ }
+
+- /* Prepare PDU, optionally w/ immediate data */
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, sizeof(*task->hdr));
++ BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE);
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
+
+- /* If we have immediate data, attach a payload */
+- if (task->data_count)
+- iscsi_tcp_send_linear_data_prepare(conn, task->data,
+- task->data_count);
+- return 0;
++ spin_lock_bh(&session->lock);
++ list_del(&conn->mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->mtask,
++ sizeof(void*));
++ spin_unlock_bh(&session->lock);
++ }
++ return 0;
++}
++
++static inline int
++iscsi_send_read_hdr(struct iscsi_conn *conn,
++ struct iscsi_tcp_cmd_task *tcp_ctask)
++{
++ int rc;
++
++ tcp_ctask->xmstate &= ~XMSTATE_R_HDR;
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)tcp_ctask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, 0);
++ if (!rc) {
++ BUG_ON(tcp_ctask->xmstate != XMSTATE_IDLE);
++ return 0; /* wait for Data-In */
+ }
++ tcp_ctask->xmstate |= XMSTATE_R_HDR;
++ return rc;
++}
+
+- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+- tcp_task->sent = 0;
+- tcp_task->exp_datasn = 0;
++static inline int
++iscsi_send_write_hdr(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc;
+
+- /* Prepare PDU, optionally w/ immediate data */
+- debug_scsi("task deq [cid %d itt 0x%x imm %d unsol %d]\n",
+- conn->id, task->itt, task->imm_count,
+- task->unsol_count);
+- iscsi_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);
++ tcp_ctask->xmstate &= ~XMSTATE_W_HDR;
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)tcp_ctask->hdrext);
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
++ if (rc) {
++ tcp_ctask->xmstate |= XMSTATE_W_HDR;
++ return rc;
++ }
+
+- if (!task->imm_count)
+- return 0;
++ if (ctask->imm_count) {
++ tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
++ iscsi_set_padding(tcp_ctask, ctask->imm_count);
+
+- /* If we have immediate data, attach a payload */
+- err = iscsi_tcp_send_data_prep(conn, scsi_out(sc)->table.sgl,
+- scsi_out(sc)->table.nents,
+- 0, task->imm_count);
+- if (err)
+- return err;
+- tcp_task->sent += task->imm_count;
+- task->imm_count = 0;
++ if (ctask->conn->datadgst_en) {
++ iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
++ tcp_ctask->immdigest = 0;
++ }
++ }
++
++ if (ctask->unsol_count)
++ tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
+ return 0;
+ }
+
+-/*
+- * iscsi_tcp_task_xmit - xmit normal PDU task
+- * @task: iscsi command task
+- *
+- * We're expected to return 0 when everything was transmitted succesfully,
+- * -EAGAIN if there's still data in the queue, or != 0 for any other kind
+- * of error.
+- */
+ static int
+-iscsi_tcp_task_xmit(struct iscsi_task *task)
++iscsi_send_padding(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
+- struct scsi_cmnd *sc = task->sc;
+- struct scsi_data_buffer *sdb;
+- int rc = 0;
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int sent = 0, rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_W_PAD) {
++ iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
++ tcp_ctask->pad_count);
++ if (conn->datadgst_en)
++ crypto_hash_update(&tcp_conn->tx_hash,
++ &tcp_ctask->sendbuf.sg,
++ tcp_ctask->sendbuf.sg.length);
++ } else if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_PAD))
++ return 0;
+
+-flush:
+- /* Flush any pending data first. */
+- rc = iscsi_tcp_flush(conn);
+- if (rc < 0)
+- return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
++ tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_PAD;
++ debug_scsi("sending %d pad bytes for itt 0x%x\n",
++ tcp_ctask->pad_count, ctask->itt);
++ rc = iscsi_sendpage(conn, &tcp_ctask->sendbuf, &tcp_ctask->pad_count,
++ &sent);
++ if (rc) {
++ debug_scsi("padding send failed %d\n", rc);
++ tcp_ctask->xmstate |= XMSTATE_W_RESEND_PAD;
++ }
++ return rc;
++}
+
+- /* mgmt command */
+- if (!sc) {
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++static int
++iscsi_send_digest(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ struct iscsi_buf *buf, uint32_t *digest)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask;
++ struct iscsi_tcp_conn *tcp_conn;
++ int rc, sent = 0;
++
++ if (!conn->datadgst_en)
+ return 0;
++
++ tcp_ctask = ctask->dd_data;
++ tcp_conn = conn->dd_data;
++
++ if (!(tcp_ctask->xmstate & XMSTATE_W_RESEND_DATA_DIGEST)) {
++ crypto_hash_final(&tcp_conn->tx_hash, (u8*)digest);
++ iscsi_buf_init_iov(buf, (char*)digest, 4);
+ }
++ tcp_ctask->xmstate &= ~XMSTATE_W_RESEND_DATA_DIGEST;
++
++ rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
++ if (!rc)
++ debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest,
++ ctask->itt);
++ else {
++ debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
++ *digest, ctask->itt);
++ tcp_ctask->xmstate |= XMSTATE_W_RESEND_DATA_DIGEST;
++ }
++ return rc;
++}
+
+- /* Are we done already? */
+- if (sc->sc_data_direction != DMA_TO_DEVICE)
+- return 0;
++static int
++iscsi_send_data(struct iscsi_cmd_task *ctask, struct iscsi_buf *sendbuf,
++ struct scatterlist **sg, int *sent, int *count,
++ struct iscsi_buf *digestbuf, uint32_t *digest)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_conn *conn = ctask->conn;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ int rc, buf_sent, offset;
++
++ while (*count) {
++ buf_sent = 0;
++ offset = sendbuf->sent;
++
++ rc = iscsi_sendpage(conn, sendbuf, count, &buf_sent);
++ *sent = *sent + buf_sent;
++ if (buf_sent && conn->datadgst_en)
++ partial_sg_digest_update(&tcp_conn->tx_hash,
++ &sendbuf->sg, sendbuf->sg.offset + offset,
++ buf_sent);
++ if (!iscsi_buf_left(sendbuf) && *sg != tcp_ctask->bad_sg) {
++ iscsi_buf_init_sg(sendbuf, *sg);
++ *sg = *sg + 1;
++ }
+
+- sdb = scsi_out(sc);
+- if (task->unsol_count != 0) {
+- struct iscsi_data *hdr = &tcp_task->unsol_dtask.hdr;
++ if (rc)
++ return rc;
++ }
+
+- /* Prepare a header for the unsolicited PDU.
+- * The amount of data we want to send will be
+- * in task->data_count.
+- * FIXME: return the data count instead.
+- */
+- iscsi_prep_unsolicit_data_pdu(task, hdr);
++ rc = iscsi_send_padding(conn, ctask);
++ if (rc)
++ return rc;
++
++ return iscsi_send_digest(conn, ctask, digestbuf, digest);
++}
++
++static int
++iscsi_send_unsol_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_data_task *dtask;
++ int rc;
++
++ tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
++ if (tcp_ctask->xmstate & XMSTATE_UNS_INIT) {
++ dtask = &tcp_ctask->unsol_dtask;
++
++ iscsi_prep_unsolicit_data_pdu(ctask, &dtask->hdr);
++ iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)&dtask->hdr,
++ sizeof(struct iscsi_hdr));
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
++ (u8*)dtask->hdrext);
++
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_INIT;
++ iscsi_set_padding(tcp_ctask, ctask->data_count);
++ }
++
++ rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->data_count);
++ if (rc) {
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
++ tcp_ctask->xmstate |= XMSTATE_UNS_HDR;
++ return rc;
++ }
++
++ if (conn->datadgst_en) {
++ dtask = &tcp_ctask->unsol_dtask;
++ iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask);
++ dtask->digest = 0;
++ }
++
++ debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
++ ctask->itt, ctask->unsol_count, tcp_ctask->sent);
++ return 0;
++}
+
+- debug_tcp("unsol dout [itt 0x%x doff %d dlen %d]\n",
+- task->itt, tcp_task->sent, task->data_count);
++static int
++iscsi_send_unsol_pdu(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc;
+
+- iscsi_tcp_send_hdr_prep(conn, hdr, sizeof(*hdr));
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents, tcp_task->sent,
+- task->data_count);
++ if (tcp_ctask->xmstate & XMSTATE_UNS_HDR) {
++ BUG_ON(!ctask->unsol_count);
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_HDR;
++send_hdr:
++ rc = iscsi_send_unsol_hdr(conn, ctask);
+ if (rc)
+- goto fail;
+- tcp_task->sent += task->data_count;
+- task->unsol_count -= task->data_count;
+- goto flush;
+- } else {
+- struct iscsi_session *session = conn->session;
+- struct iscsi_r2t_info *r2t;
++ return rc;
++ }
++
++ if (tcp_ctask->xmstate & XMSTATE_UNS_DATA) {
++ struct iscsi_data_task *dtask = &tcp_ctask->unsol_dtask;
++ int start = tcp_ctask->sent;
+
+- /* All unsolicited PDUs sent. Check for solicited PDUs.
++ rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
++ &tcp_ctask->sent, &ctask->data_count,
++ &dtask->digestbuf, &dtask->digest);
++ ctask->unsol_count -= tcp_ctask->sent - start;
++ if (rc)
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_UNS_DATA;
++ /*
++ * Done with the Data-Out. Next, check if we need
++ * to send another unsolicited Data-Out.
+ */
+- spin_lock_bh(&session->lock);
+- r2t = tcp_task->r2t;
+- if (r2t != NULL) {
+- /* Continue with this R2T? */
+- if (!iscsi_solicit_data_cont(conn, task, r2t)) {
+- debug_scsi(" done with r2t %p\n", r2t);
+-
+- __kfifo_put(tcp_task->r2tpool.queue,
+- (void*)&r2t, sizeof(void*));
+- tcp_task->r2t = r2t = NULL;
+- }
++ if (ctask->unsol_count) {
++ debug_scsi("sending more uns\n");
++ tcp_ctask->xmstate |= XMSTATE_UNS_INIT;
++ goto send_hdr;
+ }
++ }
++ return 0;
++}
+
+- if (r2t == NULL) {
+- __kfifo_get(tcp_task->r2tqueue, (void*)&tcp_task->r2t,
++static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ struct iscsi_session *session = conn->session;
++ struct iscsi_r2t_info *r2t;
++ struct iscsi_data_task *dtask;
++ int left, rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ if (!tcp_ctask->r2t) {
++ spin_lock_bh(&session->lock);
++ __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
+ sizeof(void*));
+- r2t = tcp_task->r2t;
++ spin_unlock_bh(&session->lock);
++ }
++send_hdr:
++ r2t = tcp_ctask->r2t;
++ dtask = &r2t->dtask;
++
++ if (conn->hdrdgst_en)
++ iscsi_hdr_digest(conn, &r2t->headbuf,
++ (u8*)dtask->hdrext);
++ rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
++ if (rc) {
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
++ return rc;
+ }
+- spin_unlock_bh(&session->lock);
+
+- /* Waiting for more R2Ts to arrive. */
+- if (r2t == NULL) {
+- debug_tcp("no R2Ts yet\n");
+- return 0;
++ if (conn->datadgst_en) {
++ iscsi_data_digest_init(conn->dd_data, tcp_ctask);
++ dtask->digest = 0;
+ }
+
+- debug_scsi("sol dout %p [dsn %d itt 0x%x doff %d dlen %d]\n",
+- r2t, r2t->solicit_datasn - 1, task->itt,
+- r2t->data_offset + r2t->sent, r2t->data_count);
++ iscsi_set_padding(tcp_ctask, r2t->data_count);
++ debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
++ r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
++ r2t->sent);
++ }
+
+- iscsi_tcp_send_hdr_prep(conn, &r2t->dtask.hdr,
+- sizeof(struct iscsi_hdr));
++ if (tcp_ctask->xmstate & XMSTATE_SOL_DATA) {
++ r2t = tcp_ctask->r2t;
++ dtask = &r2t->dtask;
+
+- rc = iscsi_tcp_send_data_prep(conn, sdb->table.sgl,
+- sdb->table.nents,
+- r2t->data_offset + r2t->sent,
+- r2t->data_count);
++ rc = iscsi_send_data(ctask, &r2t->sendbuf, &r2t->sg,
++ &r2t->sent, &r2t->data_count,
++ &dtask->digestbuf, &dtask->digest);
+ if (rc)
+- goto fail;
+- tcp_task->sent += r2t->data_count;
+- r2t->sent += r2t->data_count;
+- goto flush;
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
++
++ /*
++ * Done with this Data-Out. Next, check if we have
++ * to send another Data-Out for this R2T.
++ */
++ BUG_ON(r2t->data_length - r2t->sent < 0);
++ left = r2t->data_length - r2t->sent;
++ if (left) {
++ iscsi_solicit_data_cont(conn, ctask, r2t, left);
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ goto send_hdr;
++ }
++
++ /*
++ * Done with this R2T. Check if there are more
++ * outstanding R2Ts ready to be processed.
++ */
++ spin_lock_bh(&session->lock);
++ tcp_ctask->r2t = NULL;
++ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
++ sizeof(void*));
++ if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
++ sizeof(void*))) {
++ tcp_ctask->r2t = r2t;
++ tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
++ tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
++ spin_unlock_bh(&session->lock);
++ goto send_hdr;
++ }
++ spin_unlock_bh(&session->lock);
+ }
+ return 0;
+-fail:
+- iscsi_conn_failure(conn, rc);
+- return -EIO;
++}
++
++static int
++iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
++{
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
++ int rc = 0;
++
++ debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
++ conn->id, tcp_ctask->xmstate, ctask->itt);
++
++ /*
++ * serialize with TMF AbortTask
++ */
++ if (ctask->mtask)
++ return rc;
++
++ if (tcp_ctask->xmstate & XMSTATE_R_HDR)
++ return iscsi_send_read_hdr(conn, tcp_ctask);
++
++ if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
++ rc = iscsi_send_write_hdr(conn, ctask);
++ if (rc)
++ return rc;
++ }
++
++ if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
++ rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
++ &tcp_ctask->sent, &ctask->imm_count,
++ &tcp_ctask->immbuf, &tcp_ctask->immdigest);
++ if (rc)
++ return rc;
++ tcp_ctask->xmstate &= ~XMSTATE_IMM_DATA;
++ }
++
++ rc = iscsi_send_unsol_pdu(conn, ctask);
++ if (rc)
++ return rc;
++
++ rc = iscsi_send_sol_pdu(conn, ctask);
++ if (rc)
++ return rc;
++
++ return rc;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -1498,7 +1754,7 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_tcp_conn *tcp_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*tcp_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -1508,31 +1764,45 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+- tcp_conn = conn->dd_data;
++ tcp_conn = kzalloc(sizeof(*tcp_conn), GFP_KERNEL);
++ if (!tcp_conn)
++ goto tcp_conn_alloc_fail;
++
++ conn->dd_data = tcp_conn;
+ tcp_conn->iscsi_conn = conn;
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++ /* initial operational parameters */
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
+
+ tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->tx_hash.flags = 0;
+- if (IS_ERR(tcp_conn->tx_hash.tfm))
+- goto free_conn;
++ if (IS_ERR(tcp_conn->tx_hash.tfm)) {
++ printk(KERN_ERR "Could not create connection due to crc32c "
++ "loading error %ld. Make sure the crc32c module is "
++ "built as a module or into the kernel\n",
++ PTR_ERR(tcp_conn->tx_hash.tfm));
++ goto free_tcp_conn;
++ }
+
+ tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
+ CRYPTO_ALG_ASYNC);
+ tcp_conn->rx_hash.flags = 0;
+- if (IS_ERR(tcp_conn->rx_hash.tfm))
++ if (IS_ERR(tcp_conn->rx_hash.tfm)) {
++ printk(KERN_ERR "Could not create connection due to crc32c "
++ "loading error %ld. Make sure the crc32c module is "
++ "built as a module or into the kernel\n",
++ PTR_ERR(tcp_conn->rx_hash.tfm));
+ goto free_tx_tfm;
++ }
+
+ return cls_conn;
+
+ free_tx_tfm:
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+-free_conn:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "Could not create connection due to crc32c "
+- "loading error. Make sure the crc32c "
+- "module is built as a module or into the "
+- "kernel\n");
++free_tcp_conn:
++ kfree(tcp_conn);
++tcp_conn_alloc_fail:
+ iscsi_conn_teardown(cls_conn);
+ return NULL;
+ }
+@@ -1540,21 +1810,18 @@ free_conn:
+ static void
+ iscsi_tcp_release_conn(struct iscsi_conn *conn)
+ {
+- struct iscsi_session *session = conn->session;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct socket *sock = tcp_conn->sock;
+
+- if (!sock)
++ if (!tcp_conn->sock)
+ return;
+
+- sock_hold(sock->sk);
++ sock_hold(tcp_conn->sock->sk);
+ iscsi_conn_restore_callbacks(tcp_conn);
+- sock_put(sock->sk);
++ sock_put(tcp_conn->sock->sk);
+
+- spin_lock_bh(&session->lock);
++ sock_release(tcp_conn->sock);
+ tcp_conn->sock = NULL;
+- spin_unlock_bh(&session->lock);
+- sockfd_put(sock);
++ conn->recv_lock = NULL;
+ }
+
+ static void
+@@ -1564,13 +1831,14 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ iscsi_tcp_release_conn(conn);
++ iscsi_conn_teardown(cls_conn);
+
+ if (tcp_conn->tx_hash.tfm)
+ crypto_free_hash(tcp_conn->tx_hash.tfm);
+ if (tcp_conn->rx_hash.tfm)
+ crypto_free_hash(tcp_conn->rx_hash.tfm);
+
+- iscsi_conn_teardown(cls_conn);
++ kfree(tcp_conn);
+ }
+
+ static void
+@@ -1579,60 +1847,9 @@ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+- /* userspace may have goofed up and not bound us */
+- if (!tcp_conn->sock)
+- return;
+- /*
+- * Make sure our recv side is stopped.
+- * Older tools called conn stop before ep_disconnect
+- * so IO could still be coming in.
+- */
+- write_lock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+- write_unlock_bh(&tcp_conn->sock->sk->sk_callback_lock);
+-
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+-}
+-
+-static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
+- char *buf, int *port,
+- int (*getname)(struct socket *, struct sockaddr *,
+- int *addrlen))
+-{
+- struct sockaddr_storage *addr;
+- struct sockaddr_in6 *sin6;
+- struct sockaddr_in *sin;
+- int rc = 0, len;
+-
+- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+- if (!addr)
+- return -ENOMEM;
+-
+- if (getname(sock, (struct sockaddr *) addr, &len)) {
+- rc = -ENODEV;
+- goto free_addr;
+- }
+-
+- switch (addr->ss_family) {
+- case AF_INET:
+- sin = (struct sockaddr_in *)addr;
+- spin_lock_bh(&conn->session->lock);
+- sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
+- *port = be16_to_cpu(sin->sin_port);
+- spin_unlock_bh(&conn->session->lock);
+- break;
+- case AF_INET6:
+- sin6 = (struct sockaddr_in6 *)addr;
+- spin_lock_bh(&conn->session->lock);
+- sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
+- *port = be16_to_cpu(sin6->sin6_port);
+- spin_unlock_bh(&conn->session->lock);
+- break;
+- }
+-free_addr:
+- kfree(addr);
+- return rc;
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
+ }
+
+ static int
+@@ -1640,8 +1857,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
+ int is_leading)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct sock *sk;
+@@ -1651,28 +1866,13 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ /* lookup for existing socket */
+ sock = sockfd_lookup((int)transport_eph, &err);
+ if (!sock) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "sockfd_lookup failed %d\n", err);
++ printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
+ return -EEXIST;
+ }
+- /*
+- * copy these values now because if we drop the session
+- * userspace may still want to query the values since we will
+- * be using them for the reconnect
+- */
+- err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
+- &conn->portal_port, kernel_getpeername);
+- if (err)
+- goto free_socket;
+-
+- err = iscsi_tcp_get_addr(conn, sock, ihost->local_address,
+- &ihost->local_port, kernel_getsockname);
+- if (err)
+- goto free_socket;
+
+ err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+ if (err)
+- goto free_socket;
++ return err;
+
+ /* bind iSCSI connection and socket */
+ tcp_conn->sock = sock;
+@@ -1683,17 +1883,38 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
+ sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
+ sk->sk_allocation = GFP_ATOMIC;
+
++ /* FIXME: disable Nagle's algorithm */
++
++ /*
++ * Intercept TCP callbacks for sendfile like receive
++ * processing.
++ */
++ conn->recv_lock = &sk->sk_callback_lock;
+ iscsi_conn_set_callbacks(conn);
+ tcp_conn->sendpage = tcp_conn->sock->ops->sendpage;
+ /*
+ * set receive state machine into initial state
+ */
+- iscsi_tcp_hdr_recv_prep(tcp_conn);
++ tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
++
+ return 0;
++}
+
+-free_socket:
+- sockfd_put(sock);
+- return err;
++/* called with host lock */
++static void
++iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++
++ iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
++ sizeof(struct iscsi_hdr));
++ tcp_mtask->xmstate = XMSTATE_IMM_HDR;
++ tcp_mtask->sent = 0;
++
++ if (mtask->data_count)
++ iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
++ mtask->data_count);
+ }
+
+ static int
+@@ -1706,8 +1927,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ * initialize per-task: R2T pool and xmit queue
+ */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+ /*
+ * pre-allocated x4 as much r2ts to handle race when
+@@ -1716,16 +1937,18 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+ */
+
+ /* R2T pool */
+- if (iscsi_pool_init(&tcp_task->r2tpool, session->max_r2t * 4, NULL,
++ if (iscsi_pool_init(&tcp_ctask->r2tpool, session->max_r2t * 4,
++ (void***)&tcp_ctask->r2ts,
+ sizeof(struct iscsi_r2t_info))) {
+ goto r2t_alloc_fail;
+ }
+
+ /* R2T xmit queue */
+- tcp_task->r2tqueue = kfifo_alloc(
++ tcp_ctask->r2tqueue = kfifo_alloc(
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
+- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+- iscsi_pool_free(&tcp_task->r2tpool);
++ if (tcp_ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ goto r2t_alloc_fail;
+ }
+ }
+@@ -1734,11 +1957,12 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
+
+ r2t_alloc_fail:
+ for (i = 0; i < cmd_i; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ }
+ return -ENOMEM;
+ }
+@@ -1749,11 +1973,12 @@ iscsi_r2tpool_free(struct iscsi_session *session)
+ int i;
+
+ for (i = 0; i < session->cmds_max; i++) {
+- struct iscsi_task *task = session->cmds[i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- kfifo_free(tcp_task->r2tqueue);
+- iscsi_pool_free(&tcp_task->r2tpool);
++ kfifo_free(tcp_ctask->r2tqueue);
++ iscsi_pool_free(&tcp_ctask->r2tpool,
++ (void**)tcp_ctask->r2ts);
+ }
+ }
+
+@@ -1769,6 +1994,9 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
+ switch(param) {
+ case ISCSI_PARAM_HDRDGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
++ tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
++ if (conn->hdrdgst_en)
++ tcp_conn->hdr_size += sizeof(__u32);
+ break;
+ case ISCSI_PARAM_DATADGST_EN:
+ iscsi_set_param(cls_conn, param, buf, buflen);
+@@ -1777,12 +2005,12 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
+ break;
+ case ISCSI_PARAM_MAX_R2T:
+ sscanf(buf, "%d", &value);
+- if (value <= 0 || !is_power_of_2(value))
+- return -EINVAL;
+- if (session->max_r2t == value)
++ if (session->max_r2t == roundup_pow_of_two(value))
+ break;
+ iscsi_r2tpool_free(session);
+ iscsi_set_param(cls_conn, param, buf, buflen);
++ if (session->max_r2t & (session->max_r2t - 1))
++ session->max_r2t = roundup_pow_of_two(session->max_r2t);
+ if (iscsi_r2tpool_alloc(session))
+ return -ENOMEM;
+ break;
+@@ -1798,18 +2026,41 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
++ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
++ struct inet_sock *inet;
++ struct ipv6_pinfo *np;
++ struct sock *sk;
+ int len;
+
+ switch(param) {
+ case ISCSI_PARAM_CONN_PORT:
+- spin_lock_bh(&conn->session->lock);
+- len = sprintf(buf, "%hu\n", conn->portal_port);
+- spin_unlock_bh(&conn->session->lock);
++ mutex_lock(&conn->xmitmutex);
++ if (!tcp_conn->sock) {
++ mutex_unlock(&conn->xmitmutex);
++ return -EINVAL;
++ }
++
++ inet = inet_sk(tcp_conn->sock->sk);
++ len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
++ mutex_unlock(&conn->xmitmutex);
+ break;
+ case ISCSI_PARAM_CONN_ADDRESS:
+- spin_lock_bh(&conn->session->lock);
+- len = sprintf(buf, "%s\n", conn->portal_address);
+- spin_unlock_bh(&conn->session->lock);
++ mutex_lock(&conn->xmitmutex);
++ if (!tcp_conn->sock) {
++ mutex_unlock(&conn->xmitmutex);
++ return -EINVAL;
++ }
++
++ sk = tcp_conn->sock->sk;
++ if (sk->sk_family == PF_INET) {
++ inet = inet_sk(sk);
++ len = sprintf(buf, NIPQUAD_FMT "\n",
++ NIPQUAD(inet->daddr));
++ } else {
++ np = inet6_sk(sk);
++ len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr));
++ }
++ mutex_unlock(&conn->xmitmutex);
+ break;
+ default:
+ return iscsi_conn_get_param(cls_conn, param, buf);
+@@ -1843,93 +2094,65 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+ }
+
+ static struct iscsi_cls_session *
+-iscsi_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
+- uint16_t qdepth, uint32_t initial_cmdsn,
+- uint32_t *hostno)
++iscsi_tcp_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
++ uint32_t hn;
+ int cmd_i;
+
+- if (ep) {
+- printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
+- return NULL;
+- }
+-
+- shost = iscsi_host_alloc(&iscsi_sht, 0, qdepth);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_tcp_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
+-
+- if (iscsi_host_add(shost, NULL))
+- goto free_host;
+- *hostno = shost->host_no;
+-
+- cls_session = iscsi_session_setup(&iscsi_tcp_transport, shost, cmds_max,
+- sizeof(struct iscsi_tcp_task),
+- initial_cmdsn, 0);
++ cls_session = iscsi_session_setup(iscsit, scsit,
++ sizeof(struct iscsi_tcp_cmd_task),
++ sizeof(struct iscsi_tcp_mgmt_task),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++ *hostno = hn;
+
+- shost->can_queue = session->scsi_cmds_max;
++ session = class_to_transport_session(cls_session);
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
+- struct iscsi_tcp_task *tcp_task = task->dd_data;
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
++ struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+
+- task->hdr = &tcp_task->hdr.cmd_hdr;
+- task->hdr_max = sizeof(tcp_task->hdr) - ISCSI_DIGEST_SIZE;
++ ctask->hdr = &tcp_ctask->hdr;
+ }
+
+- if (iscsi_r2tpool_alloc(session))
+- goto remove_session;
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++ struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
++
++ mtask->hdr = &tcp_mtask->hdr;
++ }
++
++ if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session)))
++ goto r2tpool_alloc_fail;
++
+ return cls_session;
+
+-remove_session:
++r2tpool_alloc_fail:
+ iscsi_session_teardown(cls_session);
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+ return NULL;
+ }
+
+ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ {
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_r2tpool_free(cls_session->dd_data);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
+-
+-static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
+-{
+- blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_ANY);
+- blk_queue_dma_alignment(sdev->request_queue, 0);
+- return 0;
++ iscsi_r2tpool_free(class_to_transport_session(cls_session));
++ iscsi_session_teardown(cls_session);
+ }
+
+ static struct scsi_host_template iscsi_sht = {
+- .module = THIS_MODULE,
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+- .sg_tablesize = 4096,
++ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
++ .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .max_sectors = 0xFFFF,
+ .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+- .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+- .slave_configure = iscsi_tcp_slave_configure,
+ .proc_name = "iscsi_tcp",
+ .this_id = -1,
+ };
+@@ -1956,16 +2179,12 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+- ISCSI_TARGET_NAME | ISCSI_TPGT |
+- ISCSI_USERNAME | ISCSI_PASSWORD |
+- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_LU_RESET_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+- .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
+- ISCSI_HOST_INITIATOR_NAME |
+- ISCSI_HOST_NETDEV_NAME,
++ ISCSI_TARGET_NAME |
++ ISCSI_TPGT,
++ .host_template = &iscsi_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_conn = 1,
++ .max_cmd_len = ISCSI_TCP_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_tcp_session_create,
+ .destroy_session = iscsi_tcp_session_destroy,
+@@ -1978,15 +2197,14 @@ static struct iscsi_transport iscsi_tcp_transport = {
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_conn_start,
+ .stop_conn = iscsi_tcp_conn_stop,
+- /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
+- .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_conn_get_stats,
+- .init_task = iscsi_tcp_task_init,
+- .xmit_task = iscsi_tcp_task_xmit,
+- .cleanup_task = iscsi_tcp_cleanup_task,
++ .init_cmd_task = iscsi_tcp_cmd_init,
++ .init_mgmt_task = iscsi_tcp_mgmt_init,
++ .xmit_cmd_task = iscsi_tcp_ctask_xmit,
++ .xmit_mgmt_task = iscsi_tcp_mtask_xmit,
++ .cleanup_cmd_task = iscsi_tcp_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+ };
+@@ -1999,10 +2217,9 @@ iscsi_tcp_init(void)
+ iscsi_max_lun);
+ return -EINVAL;
+ }
++ iscsi_tcp_transport.max_lun = iscsi_max_lun;
+
+- iscsi_tcp_scsi_transport = iscsi_register_transport(
+- &iscsi_tcp_transport);
+- if (!iscsi_tcp_scsi_transport)
++ if (!iscsi_register_transport(&iscsi_tcp_transport))
+ return -ENODEV;
+
+ return 0;
+diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
+index 498d8ca..3273683 100644
+--- a/drivers/scsi/iscsi_tcp.h
++++ b/drivers/scsi/iscsi_tcp.h
+@@ -24,61 +24,68 @@
+
+ #include <scsi/libiscsi.h>
+
++/* Socket's Receive state machine */
++#define IN_PROGRESS_WAIT_HEADER 0x0
++#define IN_PROGRESS_HEADER_GATHER 0x1
++#define IN_PROGRESS_DATA_RECV 0x2
++#define IN_PROGRESS_DDIGEST_RECV 0x3
++
++/* xmit state machine */
++#define XMSTATE_IDLE 0x0
++#define XMSTATE_R_HDR 0x1
++#define XMSTATE_W_HDR 0x2
++#define XMSTATE_IMM_HDR 0x4
++#define XMSTATE_IMM_DATA 0x8
++#define XMSTATE_UNS_INIT 0x10
++#define XMSTATE_UNS_HDR 0x20
++#define XMSTATE_UNS_DATA 0x40
++#define XMSTATE_SOL_HDR 0x80
++#define XMSTATE_SOL_DATA 0x100
++#define XMSTATE_W_PAD 0x200
++#define XMSTATE_W_RESEND_PAD 0x400
++#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
++
++#define ISCSI_PAD_LEN 4
++#define ISCSI_SG_TABLESIZE SG_ALL
++#define ISCSI_TCP_MAX_CMD_LEN 16
++
+ struct crypto_hash;
+ struct socket;
+-struct iscsi_tcp_conn;
+-struct iscsi_segment;
+-
+-typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
+- struct iscsi_segment *);
+-
+-struct iscsi_segment {
+- unsigned char *data;
+- unsigned int size;
+- unsigned int copied;
+- unsigned int total_size;
+- unsigned int total_copied;
+-
+- struct hash_desc *hash;
+- unsigned char recv_digest[ISCSI_DIGEST_SIZE];
+- unsigned char digest[ISCSI_DIGEST_SIZE];
+- unsigned int digest_len;
+-
+- struct scatterlist *sg;
+- void *sg_mapped;
+- unsigned int sg_offset;
+-
+- iscsi_segment_done_fn_t *done;
+-};
+
+ /* Socket connection recieve helper */
+ struct iscsi_tcp_recv {
+ struct iscsi_hdr *hdr;
+- struct iscsi_segment segment;
+-
+- /* Allocate buffer for BHS + AHS */
+- uint32_t hdr_buf[64];
++ struct sk_buff *skb;
++ int offset;
++ int len;
++ int hdr_offset;
++ int copy;
++ int copied;
++ int padding;
++ struct iscsi_cmd_task *ctask; /* current cmd in progress */
+
+ /* copied and flipped values */
+ int datalen;
+-};
+-
+-/* Socket connection send helper */
+-struct iscsi_tcp_send {
+- struct iscsi_hdr *hdr;
+- struct iscsi_segment segment;
+- struct iscsi_segment data_segment;
++ int datadgst;
++ char zero_copy_hdr;
+ };
+
+ struct iscsi_tcp_conn {
+ struct iscsi_conn *iscsi_conn;
+ struct socket *sock;
++ struct iscsi_hdr hdr; /* header placeholder */
++ char hdrext[4*sizeof(__u16) +
++ sizeof(__u32)];
++ int data_copied;
+ int stop_stage; /* conn_stop() flag: *
+ * stop to recover, *
+ * stop to terminate */
++ /* iSCSI connection-wide sequencing */
++ int hdr_size; /* PDU header size */
++
+ /* control data */
+ struct iscsi_tcp_recv in; /* TCP receive context */
+- struct iscsi_tcp_send out; /* TCP send context */
++ int in_progress; /* connection state machine */
+
+ /* old values for socket callbacks */
+ void (*old_data_ready)(struct sock *, int);
+@@ -93,14 +100,29 @@ struct iscsi_tcp_conn {
+ uint32_t sendpage_failures_cnt;
+ uint32_t discontiguous_hdr_cnt;
+
+- int error;
+-
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
+ };
+
++struct iscsi_buf {
++ struct scatterlist sg;
++ unsigned int sent;
++ char use_sendmsg;
++};
++
+ struct iscsi_data_task {
+ struct iscsi_data hdr; /* PDU */
+- char hdrext[ISCSI_DIGEST_SIZE];/* Header-Digest */
++ char hdrext[sizeof(__u32)]; /* Header-Digest */
++ struct iscsi_buf digestbuf; /* digest buffer */
++ uint32_t digest; /* data digest */
++};
++
++struct iscsi_tcp_mgmt_task {
++ struct iscsi_hdr hdr;
++ char hdrext[sizeof(__u32)]; /* Header-Digest */
++ int xmstate; /* mgmt xmit progress */
++ struct iscsi_buf headbuf; /* header buffer */
++ struct iscsi_buf sendbuf; /* in progress buffer */
++ int sent;
+ };
+
+ struct iscsi_r2t_info {
+@@ -108,26 +130,38 @@ struct iscsi_r2t_info {
+ __be32 exp_statsn; /* copied from R2T */
+ uint32_t data_length; /* copied from R2T */
+ uint32_t data_offset; /* copied from R2T */
++ struct iscsi_buf headbuf; /* Data-Out Header Buffer */
++ struct iscsi_buf sendbuf; /* Data-Out in progress buffer*/
+ int sent; /* R2T sequence progress */
+ int data_count; /* DATA-Out payload progress */
++ struct scatterlist *sg; /* per-R2T SG list */
+ int solicit_datasn;
+- struct iscsi_data_task dtask; /* Data-Out header buf */
++ struct iscsi_data_task dtask; /* which data task */
+ };
+
+-struct iscsi_tcp_task {
+- struct iscsi_hdr_buff {
+- struct iscsi_cmd cmd_hdr;
+- char hdrextbuf[ISCSI_MAX_AHS_SIZE +
+- ISCSI_DIGEST_SIZE];
+- } hdr;
+-
++struct iscsi_tcp_cmd_task {
++ struct iscsi_cmd hdr;
++ char hdrext[4*sizeof(__u16)+ /* AHS */
++ sizeof(__u32)]; /* HeaderDigest */
++ char pad[ISCSI_PAD_LEN];
++ int pad_count; /* padded bytes */
++ struct iscsi_buf headbuf; /* header buf (xmit) */
++ struct iscsi_buf sendbuf; /* in progress buffer*/
++ int xmstate; /* xmit xtate machine */
+ int sent;
+- uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
++ struct scatterlist *sg; /* per-cmd SG list */
++ struct scatterlist *bad_sg; /* assert statement */
++ int sg_count; /* SG's to process */
++ uint32_t exp_r2tsn;
+ int data_offset;
+- struct iscsi_r2t_info *r2t; /* in progress R2T */
+- struct iscsi_pool r2tpool;
++ struct iscsi_r2t_info *r2t; /* in progress R2T */
++ struct iscsi_queue r2tpool;
+ struct kfifo *r2tqueue;
+- struct iscsi_data_task unsol_dtask; /* Data-Out header buf */
++ struct iscsi_r2t_info **r2ts;
++ int digest_count;
++ uint32_t immdigest; /* for imm data */
++ struct iscsi_buf immbuf; /* for imm data digest */
++ struct iscsi_data_task unsol_dtask; /* unsol data task */
+ };
+
+ #endif /* ISCSI_H */
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..3f5b9b4 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -22,9 +22,9 @@
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+ #include <linux/types.h>
++#include <linux/mutex.h>
+ #include <linux/kfifo.h>
+ #include <linux/delay.h>
+-#include <linux/log2.h>
+ #include <asm/unaligned.h>
+ #include <net/tcp.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -38,211 +38,92 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/libiscsi.h>
+
+-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+-#define SNA32_CHECK 2147483648UL
+-
+-static int iscsi_sna_lt(u32 n1, u32 n2)
++struct iscsi_session *
++class_to_transport_session(struct iscsi_cls_session *cls_session)
+ {
+- return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ return iscsi_hostdata(shost->hostdata);
+ }
++EXPORT_SYMBOL_GPL(class_to_transport_session);
+
+-/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
+-static int iscsi_sna_lte(u32 n1, u32 n2)
+-{
+- return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
+- (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
+-}
++#define INVALID_SN_DELTA 0xffff
+
+-void
+-iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
++int
++iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+ {
+ uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
+ uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
+
+- /*
+- * standard specifies this check for when to update expected and
+- * max sequence numbers
+- */
+- if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
+- return;
+-
+- if (exp_cmdsn != session->exp_cmdsn &&
+- !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
++ if (max_cmdsn < exp_cmdsn -1 &&
++ max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
++ return ISCSI_ERR_MAX_CMDSN;
++ if (max_cmdsn > session->max_cmdsn ||
++ max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
++ session->max_cmdsn = max_cmdsn;
++ if (exp_cmdsn > session->exp_cmdsn ||
++ exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
+ session->exp_cmdsn = exp_cmdsn;
+
+- if (max_cmdsn != session->max_cmdsn &&
+- !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
+- session->max_cmdsn = max_cmdsn;
+- /*
+- * if the window closed with IO queued, then kick the
+- * xmit thread
+- */
+- if (!list_empty(&session->leadconn->xmitqueue) ||
+- !list_empty(&session->leadconn->mgmtqueue)) {
+- if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(session->host,
+- &session->leadconn->xmitwork);
+- }
+- }
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
++EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn);
+
+-void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *task,
++void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+
+ memset(hdr, 0, sizeof(struct iscsi_data));
+ hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
+- hdr->datasn = cpu_to_be32(task->unsol_datasn);
+- task->unsol_datasn++;
++ hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
++ ctask->unsol_datasn++;
+ hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
+
+- hdr->itt = task->hdr->itt;
++ hdr->itt = ctask->hdr->itt;
+ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- hdr->offset = cpu_to_be32(task->unsol_offset);
++ hdr->offset = cpu_to_be32(ctask->unsol_offset);
+
+- if (task->unsol_count > conn->max_xmit_dlength) {
++ if (ctask->unsol_count > conn->max_xmit_dlength) {
+ hton24(hdr->dlength, conn->max_xmit_dlength);
+- task->data_count = conn->max_xmit_dlength;
+- task->unsol_offset += task->data_count;
++ ctask->data_count = conn->max_xmit_dlength;
++ ctask->unsol_offset += ctask->data_count;
+ hdr->flags = 0;
+ } else {
+- hton24(hdr->dlength, task->unsol_count);
+- task->data_count = task->unsol_count;
++ hton24(hdr->dlength, ctask->unsol_count);
++ ctask->data_count = ctask->unsol_count;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
+
+-static int iscsi_add_hdr(struct iscsi_task *task, unsigned len)
+-{
+- unsigned exp_len = task->hdr_len + len;
+-
+- if (exp_len > task->hdr_max) {
+- WARN_ON(1);
+- return -EINVAL;
+- }
+-
+- WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
+- task->hdr_len = exp_len;
+- return 0;
+-}
+-
+-/*
+- * make an extended cdb AHS
+- */
+-static int iscsi_prep_ecdb_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *cmd = task->sc;
+- unsigned rlen, pad_len;
+- unsigned short ahslength;
+- struct iscsi_ecdb_ahdr *ecdb_ahdr;
+- int rc;
+-
+- ecdb_ahdr = iscsi_next_hdr(task);
+- rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
+-
+- BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
+- ahslength = rlen + sizeof(ecdb_ahdr->reserved);
+-
+- pad_len = iscsi_padding(rlen);
+-
+- rc = iscsi_add_hdr(task, sizeof(ecdb_ahdr->ahslength) +
+- sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
+- if (rc)
+- return rc;
+-
+- if (pad_len)
+- memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
+-
+- ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
+- ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
+- ecdb_ahdr->reserved = 0;
+- memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
+-
+- debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
+- "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
+- cmd->cmd_len, rlen, pad_len, ahslength, task->hdr_len);
+-
+- return 0;
+-}
+-
+-static int iscsi_prep_bidi_ahs(struct iscsi_task *task)
+-{
+- struct scsi_cmnd *sc = task->sc;
+- struct iscsi_rlength_ahdr *rlen_ahdr;
+- int rc;
+-
+- rlen_ahdr = iscsi_next_hdr(task);
+- rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr));
+- if (rc)
+- return rc;
+-
+- rlen_ahdr->ahslength =
+- cpu_to_be16(sizeof(rlen_ahdr->read_length) +
+- sizeof(rlen_ahdr->reserved));
+- rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
+- rlen_ahdr->reserved = 0;
+- rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
+-
+- debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
+- "rlen_ahdr->ahslength(%d)\n",
+- be32_to_cpu(rlen_ahdr->read_length),
+- be16_to_cpu(rlen_ahdr->ahslength));
+- return 0;
+-}
+-
+ /**
+ * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
+- * @task: iscsi task
++ * @ctask: iscsi cmd task
+ *
+ * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
+ * fields like dlength or final based on how much data it sends
+ */
+-static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
++static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
+- unsigned hdrlength, cmd_len;
+- int rc;
+-
+- task->hdr_len = 0;
+- rc = iscsi_add_hdr(task, sizeof(*hdr));
+- if (rc)
+- return rc;
+- hdr->opcode = ISCSI_OP_SCSI_CMD;
+- hdr->flags = ISCSI_ATTR_SIMPLE;
+- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->itt = build_itt(task->itt, session->age);
+- hdr->cmdsn = cpu_to_be32(session->cmdsn);
+- session->cmdsn++;
+- hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- cmd_len = sc->cmd_len;
+- if (cmd_len < ISCSI_CDB_SIZE)
+- memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
+- else if (cmd_len > ISCSI_CDB_SIZE) {
+- rc = iscsi_prep_ecdb_ahs(task);
+- if (rc)
+- return rc;
+- cmd_len = ISCSI_CDB_SIZE;
+- }
+- memcpy(hdr->cdb, sc->cmnd, cmd_len);
+-
+- task->imm_count = 0;
+- if (scsi_bidi_cmnd(sc)) {
+- hdr->flags |= ISCSI_FLAG_CMD_READ;
+- rc = iscsi_prep_bidi_ahs(task);
+- if (rc)
+- return rc;
+- }
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
++
++ hdr->opcode = ISCSI_OP_SCSI_CMD;
++ hdr->flags = ISCSI_ATTR_SIMPLE;
++ int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
++ hdr->itt = build_itt(ctask->itt, conn->id, session->age);
++ hdr->data_length = cpu_to_be32(sc->request_bufflen);
++ hdr->cmdsn = cpu_to_be32(session->cmdsn);
++ session->cmdsn++;
++ hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
++ memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
++
++ ctask->data_count = 0;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- unsigned out_len = scsi_out(sc)->length;
+- hdr->data_length = cpu_to_be32(out_len);
+ hdr->flags |= ISCSI_FLAG_CMD_WRITE;
+ /*
+ * Write counters:
+@@ -258,318 +139,117 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
+ *
+ * pad_count bytes to be sent as zero-padding
+ */
+- task->unsol_count = 0;
+- task->unsol_offset = 0;
+- task->unsol_datasn = 0;
++ ctask->imm_count = 0;
++ ctask->unsol_count = 0;
++ ctask->unsol_offset = 0;
++ ctask->unsol_datasn = 0;
+
+ if (session->imm_data_en) {
+- if (out_len >= session->first_burst)
+- task->imm_count = min(session->first_burst,
++ if (ctask->total_length >= session->first_burst)
++ ctask->imm_count = min(session->first_burst,
+ conn->max_xmit_dlength);
+ else
+- task->imm_count = min(out_len,
++ ctask->imm_count = min(ctask->total_length,
+ conn->max_xmit_dlength);
+- hton24(hdr->dlength, task->imm_count);
++ hton24(ctask->hdr->dlength, ctask->imm_count);
+ } else
+- zero_data(hdr->dlength);
++ zero_data(ctask->hdr->dlength);
+
+ if (!session->initial_r2t_en) {
+- task->unsol_count = min(session->first_burst, out_len)
+- - task->imm_count;
+- task->unsol_offset = task->imm_count;
++ ctask->unsol_count = min(session->first_burst,
++ ctask->total_length) - ctask->imm_count;
++ ctask->unsol_offset = ctask->imm_count;
+ }
+
+- if (!task->unsol_count)
++ if (!ctask->unsol_count)
+ /* No unsolicit Data-Out's */
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
++ ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ } else {
++ ctask->datasn = 0;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ zero_data(hdr->dlength);
+- hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
+
+ if (sc->sc_data_direction == DMA_FROM_DEVICE)
+ hdr->flags |= ISCSI_FLAG_CMD_READ;
+ }
+
+- /* calculate size of additional header segments (AHSs) */
+- hdrlength = task->hdr_len - sizeof(*hdr);
+-
+- WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
+- hdrlength /= ISCSI_PAD_LEN;
+-
+- WARN_ON(hdrlength >= 256);
+- hdr->hlength = hdrlength & 0xFF;
+-
+- if (conn->session->tt->init_task &&
+- conn->session->tt->init_task(task))
+- return -EIO;
+-
+- task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(&task->running, &conn->run_list);
+-
+ conn->scsicmd_pdus_cnt++;
+- debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
+- "bidi_len %d cmdsn %d win %d]\n", scsi_bidi_cmnd(sc) ?
+- "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ?
+- "write" : "read", conn->id, sc, sc->cmnd[0], task->itt,
+- scsi_bufflen(sc),
+- scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
+- session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+- return 0;
+ }
++EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
+
+ /**
+- * iscsi_complete_command - finish a task
+- * @task: iscsi cmd task
++ * iscsi_complete_command - return command back to scsi-ml
++ * @ctask: iscsi cmd task
+ *
+ * Must be called with session lock.
+- * This function returns the scsi command to scsi-ml or cleans
+- * up mgmt tasks then returns the task to the pool.
++ * This function returns the scsi command to scsi-ml and returns
++ * the cmd task to the pool of available cmd tasks.
+ */
+-static void iscsi_complete_command(struct iscsi_task *task)
++static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
+-
+- list_del_init(&task->running);
+- task->state = ISCSI_TASK_COMPLETED;
+- task->sc = NULL;
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /*
+- * login task is preallocated so do not free
+- */
+- if (conn->login_task == task)
+- return;
++ struct iscsi_session *session = ctask->conn->session;
++ struct scsi_cmnd *sc = ctask->sc;
+
+- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+-
+- if (conn->ping_task == task)
+- conn->ping_task = NULL;
+-
+- if (sc) {
+- task->sc = NULL;
+- /* SCSI eh reuses commands to verify us */
+- sc->SCp.ptr = NULL;
+- /*
+- * queue command may call this to free the task, but
+- * not have setup the sc callback
+- */
+- if (sc->scsi_done)
+- sc->scsi_done(sc);
+- }
++ ctask->state = ISCSI_TASK_COMPLETED;
++ ctask->sc = NULL;
++ /* SCSI eh reuses commands to verify us */
++ sc->SCp.ptr = NULL;
++ list_del_init(&ctask->running);
++ __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
++ sc->scsi_done(sc);
+ }
+
+-void __iscsi_get_task(struct iscsi_task *task)
++static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- atomic_inc(&task->refcount);
++ atomic_inc(&ctask->refcount);
+ }
+-EXPORT_SYMBOL_GPL(__iscsi_get_task);
+
+-static void __iscsi_put_task(struct iscsi_task *task)
++static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
+ {
+- if (atomic_dec_and_test(&task->refcount))
+- iscsi_complete_command(task);
++ spin_lock_bh(&ctask->conn->session->lock);
++ __iscsi_get_ctask(ctask);
++ spin_unlock_bh(&ctask->conn->session->lock);
+ }
+
+-void iscsi_put_task(struct iscsi_task *task)
++static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_session *session = task->conn->session;
+-
+- spin_lock_bh(&session->lock);
+- __iscsi_put_task(task);
+- spin_unlock_bh(&session->lock);
++ if (atomic_dec_and_test(&ctask->refcount))
++ iscsi_complete_command(ctask);
+ }
+-EXPORT_SYMBOL_GPL(iscsi_put_task);
+
+-/*
+- * session lock must be held
+- */
+-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
+- int err)
++static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
+ {
+- struct scsi_cmnd *sc;
+-
+- sc = task->sc;
+- if (!sc)
+- return;
+-
+- if (task->state == ISCSI_TASK_PENDING)
+- /*
+- * cmd never made it to the xmit thread, so we should not count
+- * the cmd in the sequencing
+- */
+- conn->session->queued_cmdsn--;
+- else
+- conn->session->tt->cleanup_task(conn, task);
+- /*
+- * Check if cleanup_task dropped the lock and the command completed,
+- */
+- if (!task->sc)
+- return;
+-
+- sc->result = err;
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+-
+- if (conn->task == task)
+- conn->task = NULL;
+- /* release ref from queuecommand */
+- __iscsi_put_task(task);
++ spin_lock_bh(&ctask->conn->session->lock);
++ __iscsi_put_ctask(ctask);
++ spin_unlock_bh(&ctask->conn->session->lock);
+ }
+
+-static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
+- struct iscsi_task *task)
+-{
+- struct iscsi_session *session = conn->session;
+- struct iscsi_hdr *hdr = (struct iscsi_hdr *)task->hdr;
+- struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
+-
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- return -ENOTCONN;
+-
+- if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
+- hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
+- /*
+- * pre-format CmdSN for outgoing PDU.
+- */
+- nop->cmdsn = cpu_to_be32(session->cmdsn);
+- if (hdr->itt != RESERVED_ITT) {
+- hdr->itt = build_itt(task->itt, session->age);
+- /*
+- * TODO: We always use immediate, so we never hit this.
+- * If we start to send tmfs or nops as non-immediate then
+- * we should start checking the cmdsn numbers for mgmt tasks.
+- */
+- if (conn->c_stage == ISCSI_CONN_STARTED &&
+- !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+- session->queued_cmdsn++;
+- session->cmdsn++;
+- }
+- }
+-
+- if (session->tt->init_task)
+- session->tt->init_task(task);
+-
+- if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+- session->state = ISCSI_STATE_LOGGING_OUT;
+-
+- list_move_tail(&task->running, &conn->mgmt_run_list);
+- debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
+- hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
+- task->data_count);
+- return 0;
+-}
+-
+-static struct iscsi_task *
+-__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- char *data, uint32_t data_size)
+-{
+- struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
+-
+- if (session->state == ISCSI_STATE_TERMINATE)
+- return NULL;
+-
+- if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
+- hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
+- /*
+- * Login and Text are sent serially, in
+- * request-followed-by-response sequence.
+- * Same task can be used. Same ITT must be used.
+- * Note that login_task is preallocated at conn_create().
+- */
+- task = conn->login_task;
+- else {
+- BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
+- BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
+-
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&task, sizeof(void*)))
+- return NULL;
+-
+- if ((hdr->opcode == (ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE)) &&
+- hdr->ttt == RESERVED_ITT) {
+- conn->ping_task = task;
+- conn->last_ping = jiffies;
+- }
+- }
+- /*
+- * released in complete pdu for task we expect a response for, and
+- * released by the lld when it has transmitted the task for
+- * pdus we do not expect a response for.
+- */
+- atomic_set(&task->refcount, 1);
+- task->conn = conn;
+- task->sc = NULL;
+-
+- if (data_size) {
+- memcpy(task->data, data, data_size);
+- task->data_count = data_size;
+- } else
+- task->data_count = 0;
+-
+- memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->mgmtqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_mgmt_task(conn, task)) {
+- __iscsi_put_task(task);
+- return NULL;
+- }
+-
+- if (session->tt->xmit_task(task))
+- task = NULL;
+-
+- } else
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-
+- return task;
+-}
+-
+-int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
+- char *data, uint32_t data_size)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_session *session = conn->session;
+- int err = 0;
+-
+- spin_lock_bh(&session->lock);
+- if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
+- err = -EPERM;
+- spin_unlock_bh(&session->lock);
+- return err;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
+-
+ /**
+ * iscsi_cmd_rsp - SCSI Command Response processing
+ * @conn: iscsi connection
+ * @hdr: iscsi header
+- * @task: scsi command task
++ * @ctask: scsi command task
+ * @data: cmd data buffer
+ * @datalen: len of buffer
+ *
+ * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
+ * then completes the command and task.
+ **/
+-static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+- struct iscsi_task *task, char *data,
+- int datalen)
++static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ struct iscsi_cmd_task *ctask, char *data,
++ int datalen)
+ {
++ int rc;
+ struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
+ struct iscsi_session *session = conn->session;
+- struct scsi_cmnd *sc = task->sc;
++ struct scsi_cmnd *sc = ctask->sc;
++
++ rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
++ if (rc) {
++ sc->result = DID_ERROR << 16;
++ goto out;
++ }
+
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
+ conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
+
+ sc->result = (DID_OK << 16) | rhdr->cmd_status;
+@@ -584,14 +264,13 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+
+ if (datalen < 2) {
+ invalid_datalen:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "Got CHECK_CONDITION but invalid data "
+- "buffer size of %d\n", datalen);
++ printk(KERN_ERR "iscsi: Got CHECK_CONDITION but "
++ "invalid data buffer size of %d\n", datalen);
+ sc->result = DID_BAD_TARGET << 16;
+ goto out;
+ }
+
+- senselen = get_unaligned_be16(data);
++ senselen = be16_to_cpu(get_unaligned((__be16 *) data));
+ if (datalen < senselen)
+ goto invalid_datalen;
+
+@@ -601,36 +280,28 @@ invalid_datalen:
+ min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
+ }
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
+- ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
+- int res_count = be32_to_cpu(rhdr->bi_residual_count);
+-
+- if (scsi_bidi_cmnd(sc) && res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
+- res_count <= scsi_in(sc)->length))
+- scsi_in(sc)->resid = res_count;
+- else
+- sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ if (sc->sc_data_direction == DMA_TO_DEVICE)
++ goto out;
+
+- if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
+- ISCSI_FLAG_CMD_OVERFLOW)) {
++ if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
+ int res_count = be32_to_cpu(rhdr->residual_count);
+
+- if (res_count > 0 &&
+- (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
+- res_count <= scsi_bufflen(sc)))
+- /* write side for bidi or uni-io set_resid */
+- scsi_set_resid(sc, res_count);
++ if (res_count > 0 && res_count <= sc->request_bufflen)
++ sc->resid = res_count;
+ else
+ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
+- }
++ } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
++ sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
++ else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
++ sc->resid = be32_to_cpu(rhdr->residual_count);
++
+ out:
+ debug_scsi("done [sc %lx res %d itt 0x%x]\n",
+- (long)sc, sc->result, task->itt);
++ (long)sc, sc->result, ctask->itt);
+ conn->scsirsp_pdus_cnt++;
+
+- __iscsi_put_task(task);
++ __iscsi_put_ctask(ctask);
++ return rc;
+ }
+
+ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+@@ -640,42 +311,18 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ conn->tmfrsp_pdus_cnt++;
+
+- if (conn->tmf_state != TMF_QUEUED)
++ if (conn->tmabort_state != TMABORT_INITIAL)
+ return;
+
+ if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
+- conn->tmf_state = TMF_SUCCESS;
++ conn->tmabort_state = TMABORT_SUCCESS;
+ else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
+- conn->tmf_state = TMF_NOT_FOUND;
++ conn->tmabort_state = TMABORT_NOT_FOUND;
+ else
+- conn->tmf_state = TMF_FAILED;
++ conn->tmabort_state = TMABORT_FAILED;
+ wake_up(&conn->ehwait);
+ }
+
+-static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+-{
+- struct iscsi_nopout hdr;
+- struct iscsi_task *task;
+-
+- if (!rhdr && conn->ping_task)
+- return;
+-
+- memset(&hdr, 0, sizeof(struct iscsi_nopout));
+- hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
+- hdr.flags = ISCSI_FLAG_CMD_FINAL;
+-
+- if (rhdr) {
+- memcpy(hdr.lun, rhdr->lun, 8);
+- hdr.ttt = rhdr->ttt;
+- hdr.itt = RESERVED_ITT;
+- } else
+- hdr.ttt = RESERVED_ITT;
+-
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
+- if (!task)
+- iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
+-}
+-
+ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
+ {
+@@ -692,41 +339,15 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
+ memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
+ itt = get_itt(rejected_pdu.itt);
+- iscsi_conn_printk(KERN_ERR, conn,
+- "itt 0x%x had pdu (op 0x%x) rejected "
+- "due to DataDigest error.\n", itt,
+- rejected_pdu.opcode);
++ printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
++ "due to DataDigest error.\n", itt,
++ rejected_pdu.opcode);
+ }
+ }
+ return 0;
+ }
+
+ /**
+- * iscsi_itt_to_task - look up task by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for mgmt tasks like login and nops, or if
+- * the LDD's itt space does not include the session age.
+- *
+- * The session lock must be held.
+- */
+-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return NULL;
+-
+- i = get_itt(itt);
+- if (i >= session->cmds_max)
+- return NULL;
+-
+- return session->cmds[i];
+-}
+-
+-/**
+ * __iscsi_complete_pdu - complete pdu
+ * @conn: iscsi conn
+ * @hdr: iscsi header
+@@ -742,24 +363,105 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ {
+ struct iscsi_session *session = conn->session;
+ int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
+- struct iscsi_task *task;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
+ uint32_t itt;
+
+- conn->last_recv = jiffies;
+- rc = iscsi_verify_itt(conn, hdr->itt);
+- if (rc)
+- return rc;
+-
+ if (hdr->itt != RESERVED_ITT)
+ itt = get_itt(hdr->itt);
+ else
+ itt = ~0U;
+
+- debug_scsi("[op 0x%x cid %d itt 0x%x len %d]\n",
+- opcode, conn->id, itt, datalen);
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
++
++ debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, ctask->itt, datalen);
++
++ switch(opcode) {
++ case ISCSI_OP_SCSI_CMD_RSP:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ rc = iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
++ datalen);
++ break;
++ case ISCSI_OP_SCSI_DATA_IN:
++ BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
++ if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
++ conn->scsirsp_pdus_cnt++;
++ __iscsi_put_ctask(ctask);
++ }
++ break;
++ case ISCSI_OP_R2T:
++ /* LLD handles this for now */
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
++ itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
++ mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
++
++ debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
++ opcode, conn->id, mtask->itt, datalen);
+
+- if (itt == ~0U) {
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
++ rc = iscsi_check_assign_cmdsn(session,
++ (struct iscsi_nopin*)hdr);
++ if (rc)
++ goto done;
++
++ switch(opcode) {
++ case ISCSI_OP_LOGOUT_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++ /* fall through */
++ case ISCSI_OP_LOGIN_RSP:
++ case ISCSI_OP_TEXT_RSP:
++ /*
++ * login related PDU's exp_statsn is handled in
++ * userspace
++ */
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ list_del(&mtask->running);
++ if (conn->login_mtask != mtask)
++ __kfifo_put(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
++ break;
++ case ISCSI_OP_SCSI_TMFUNC_RSP:
++ if (datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++
++ iscsi_tmf_rsp(conn, hdr);
++ break;
++ case ISCSI_OP_NOOP_IN:
++ if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
++ rc = ISCSI_ERR_PROTO;
++ break;
++ }
++ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
++
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
++ rc = ISCSI_ERR_CONN_FAILED;
++ list_del(&mtask->running);
++ if (conn->login_mtask != mtask)
++ __kfifo_put(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*));
++ break;
++ default:
++ rc = ISCSI_ERR_BAD_OPCODE;
++ break;
++ }
++ } else if (itt == ~0U) {
++ rc = iscsi_check_assign_cmdsn(session,
++ (struct iscsi_nopin*)hdr);
++ if (rc)
++ goto done;
+
+ switch(opcode) {
+ case ISCSI_OP_NOOP_IN:
+@@ -771,7 +473,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
+ break;
+
+- iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
++ if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
++ rc = ISCSI_ERR_CONN_FAILED;
+ break;
+ case ISCSI_OP_REJECT:
+ rc = iscsi_handle_reject(conn, hdr, data, datalen);
+@@ -785,101 +488,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ rc = ISCSI_ERR_BAD_OPCODE;
+ break;
+ }
+- goto out;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- case ISCSI_OP_SCSI_DATA_IN:
+- task = iscsi_itt_to_ctask(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- case ISCSI_OP_R2T:
+- /*
+- * LLD handles R2Ts if they need to.
+- */
+- return 0;
+- case ISCSI_OP_LOGOUT_RSP:
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- case ISCSI_OP_NOOP_IN:
+- task = iscsi_itt_to_task(conn, hdr->itt);
+- if (!task)
+- return ISCSI_ERR_BAD_ITT;
+- break;
+- default:
+- return ISCSI_ERR_BAD_OPCODE;
+- }
+-
+- switch(opcode) {
+- case ISCSI_OP_SCSI_CMD_RSP:
+- iscsi_scsi_cmd_rsp(conn, hdr, task, data, datalen);
+- break;
+- case ISCSI_OP_SCSI_DATA_IN:
+- if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+- conn->scsirsp_pdus_cnt++;
+- iscsi_update_cmdsn(session,
+- (struct iscsi_nopin*) hdr);
+- __iscsi_put_task(task);
+- }
+- break;
+- case ISCSI_OP_LOGOUT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+- goto recv_pdu;
+- case ISCSI_OP_LOGIN_RSP:
+- case ISCSI_OP_TEXT_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- /*
+- * login related PDU's exp_statsn is handled in
+- * userspace
+- */
+- goto recv_pdu;
+- case ISCSI_OP_SCSI_TMFUNC_RSP:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+-
+- iscsi_tmf_rsp(conn, hdr);
+- __iscsi_put_task(task);
+- break;
+- case ISCSI_OP_NOOP_IN:
+- iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
+- if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) {
+- rc = ISCSI_ERR_PROTO;
+- break;
+- }
+- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+-
+- if (conn->ping_task != task)
+- /*
+- * If this is not in response to one of our
+- * nops then it must be from userspace.
+- */
+- goto recv_pdu;
+-
+- mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+- __iscsi_put_task(task);
+- break;
+- default:
+- rc = ISCSI_ERR_BAD_OPCODE;
+- break;
+- }
++ } else
++ rc = ISCSI_ERR_BAD_ITT;
+
+-out:
+- return rc;
+-recv_pdu:
+- if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+- rc = ISCSI_ERR_CONN_FAILED;
+- __iscsi_put_task(task);
++done:
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
+@@ -896,63 +508,55 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
+
+-int iscsi_verify_itt(struct iscsi_conn *conn, itt_t itt)
++/* verify itt (itt encoding: age+cid+itt) */
++int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ uint32_t *ret_itt)
+ {
+ struct iscsi_session *session = conn->session;
+- uint32_t i;
+-
+- if (itt == RESERVED_ITT)
+- return 0;
+-
+- if (((__force u32)itt & ISCSI_AGE_MASK) !=
+- (session->age << ISCSI_AGE_SHIFT)) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received itt %x expected session age (%x)\n",
+- (__force u32)itt, session->age);
+- return ISCSI_ERR_BAD_ITT;
+- }
++ struct iscsi_cmd_task *ctask;
++ uint32_t itt;
+
+- i = get_itt(itt);
+- if (i >= session->cmds_max) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "received invalid itt index %u (max cmds "
+- "%u.\n", i, session->cmds_max);
+- return ISCSI_ERR_BAD_ITT;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_verify_itt);
++ if (hdr->itt != RESERVED_ITT) {
++ if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
++ (session->age << ISCSI_AGE_SHIFT)) {
++ printk(KERN_ERR "iscsi: received itt %x expected "
++ "session age (%x)\n", (__force u32)hdr->itt,
++ session->age & ISCSI_AGE_MASK);
++ return ISCSI_ERR_BAD_ITT;
++ }
+
+-/**
+- * iscsi_itt_to_ctask - look up ctask by itt
+- * @conn: iscsi connection
+- * @itt: itt
+- *
+- * This should be used for cmd tasks.
+- *
+- * The session lock must be held.
+- */
+-struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)
+-{
+- struct iscsi_task *task;
++ if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
++ (conn->id << ISCSI_CID_SHIFT)) {
++ printk(KERN_ERR "iscsi: received itt %x, expected "
++ "CID (%x)\n", (__force u32)hdr->itt, conn->id);
++ return ISCSI_ERR_BAD_ITT;
++ }
++ itt = get_itt(hdr->itt);
++ } else
++ itt = ~0U;
+
+- if (iscsi_verify_itt(conn, itt))
+- return NULL;
++ if (itt < session->cmds_max) {
++ ctask = session->cmds[itt];
+
+- task = iscsi_itt_to_task(conn, itt);
+- if (!task || !task->sc)
+- return NULL;
++ if (!ctask->sc) {
++ printk(KERN_INFO "iscsi: dropping ctask with "
++ "itt 0x%x\n", ctask->itt);
++ /* force drop */
++ return ISCSI_ERR_NO_SCSI_CMD;
++ }
+
+- if (task->sc->SCp.phase != conn->session->age) {
+- iscsi_session_printk(KERN_ERR, conn->session,
+- "task's session age %d, expected %d\n",
+- task->sc->SCp.phase, conn->session->age);
+- return NULL;
++ if (ctask->sc->SCp.phase != session->age) {
++ printk(KERN_ERR "iscsi: ctask's session age %d, "
++ "expected %d\n", ctask->sc->SCp.phase,
++ session->age);
++ return ISCSI_ERR_SESSION_FAILED;
++ }
+ }
+
+- return task;
++ *ret_itt = itt;
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_itt_to_ctask);
++EXPORT_SYMBOL_GPL(iscsi_verify_itt);
+
+ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ {
+@@ -974,55 +578,29 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_failure);
+
+-static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
++static int iscsi_xmit_mtask(struct iscsi_conn *conn)
+ {
+- struct iscsi_session *session = conn->session;
++ struct iscsi_hdr *hdr = conn->mtask->hdr;
++ int rc, was_logout = 0;
+
+- /*
+- * Check for iSCSI window and take care of CmdSN wrap-around
+- */
+- if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
+- debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u "
+- "CmdSN %u/%u\n", session->exp_cmdsn,
+- session->max_cmdsn, session->cmdsn,
+- session->queued_cmdsn);
+- return -ENOSPC;
++ if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
++ conn->session->state = ISCSI_STATE_IN_RECOVERY;
++ iscsi_block_session(session_to_cls(conn->session));
++ was_logout = 1;
+ }
+- return 0;
+-}
+-
+-static int iscsi_xmit_task(struct iscsi_conn *conn)
+-{
+- struct iscsi_task *task = conn->task;
+- int rc;
+-
+- __iscsi_get_task(task);
+- spin_unlock_bh(&conn->session->lock);
+- rc = conn->session->tt->xmit_task(task);
+- spin_lock_bh(&conn->session->lock);
+- __iscsi_put_task(task);
+- if (!rc)
+- /* done with this task */
+- conn->task = NULL;
+- return rc;
+-}
++ rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
++ if (rc)
++ return rc;
+
+-/**
+- * iscsi_requeue_task - requeue task to run from session workqueue
+- * @task: task to requeue
+- *
+- * LLDs that need to run a task from the session workqueue should call
+- * this. The session lock must be held. This should only be called
+- * by software drivers.
+- */
+-void iscsi_requeue_task(struct iscsi_task *task)
+-{
+- struct iscsi_conn *conn = task->conn;
++ /* done with this in-progress mtask */
++ conn->mtask = NULL;
+
+- list_move_tail(&task->running, &conn->requeue);
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ if (was_logout) {
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++ return -ENODATA;
++ }
++ return 0;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+
+ /**
+ * iscsi_data_xmit - xmit any command into the scheduled connection
+@@ -1035,94 +613,106 @@ EXPORT_SYMBOL_GPL(iscsi_requeue_task);
+ **/
+ static int iscsi_data_xmit(struct iscsi_conn *conn)
+ {
++ struct iscsi_transport *tt;
+ int rc = 0;
+
+- spin_lock_bh(&conn->session->lock);
+ if (unlikely(conn->suspend_tx)) {
+ debug_scsi("conn %d Tx suspended!\n", conn->id);
+- spin_unlock_bh(&conn->session->lock);
+ return -ENODATA;
+ }
+-
+- if (conn->task) {
+- rc = iscsi_xmit_task(conn);
+- if (rc)
+- goto again;
+- }
++ tt = conn->session->tt;
+
+ /*
+- * process mgmt pdus like nops before commands since we should
+- * only have one nop-out as a ping from us and targets should not
+- * overflow us with nop-ins
++ * Transmit in the following order:
++ *
++ * 1) un-finished xmit (ctask or mtask)
++ * 2) immediate control PDUs
++ * 3) write data
++ * 4) SCSI commands
++ * 5) non-immediate control PDUs
++ *
++ * No need to lock around __kfifo_get as long as
++ * there's one producer and one consumer.
+ */
+-check_mgmt:
+- while (!list_empty(&conn->mgmtqueue)) {
+- conn->task = list_entry(conn->mgmtqueue.next,
+- struct iscsi_task, running);
+- if (iscsi_prep_mgmt_task(conn, conn->task)) {
+- __iscsi_put_task(conn->task);
+- conn->task = NULL;
+- continue;
+- }
+- rc = iscsi_xmit_task(conn);
+- if (rc)
+- goto again;
+- }
+
+- /* process pending command queue */
+- while (!list_empty(&conn->xmitqueue)) {
+- if (conn->tmf_state == TMF_QUEUED)
+- break;
++ BUG_ON(conn->ctask && conn->mtask);
+
+- conn->task = list_entry(conn->xmitqueue.next,
+- struct iscsi_task, running);
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
+- fail_command(conn, conn->task, DID_IMM_RETRY << 16);
+- continue;
+- }
+- if (iscsi_prep_scsi_cmd_pdu(conn->task)) {
+- fail_command(conn, conn->task, DID_ABORT << 16);
+- continue;
+- }
+- rc = iscsi_xmit_task(conn);
++ if (conn->ctask) {
++ iscsi_get_ctask(conn->ctask);
++ rc = tt->xmit_cmd_task(conn, conn->ctask);
++ iscsi_put_ctask(conn->ctask);
+ if (rc)
+ goto again;
+- /*
+- * we could continuously get new task requests so
+- * we need to check the mgmt queue for nops that need to
+- * be sent to aviod starvation
+- */
+- if (!list_empty(&conn->mgmtqueue))
+- goto check_mgmt;
++ /* done with this in-progress ctask */
++ conn->ctask = NULL;
++ }
++ if (conn->mtask) {
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
+ }
+
+- while (!list_empty(&conn->requeue)) {
+- if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
+- break;
++ /* process immediate first */
++ if (unlikely(__kfifo_len(conn->immqueue))) {
++ while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
++ sizeof(void*))) {
++ spin_lock_bh(&conn->session->lock);
++ list_add_tail(&conn->mtask->running,
++ &conn->mgmt_run_list);
++ spin_unlock_bh(&conn->session->lock);
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
++ }
++ }
+
++ /* process command queue */
++ spin_lock_bh(&conn->session->lock);
++ while (!list_empty(&conn->xmitqueue)) {
+ /*
+- * we always do fastlogout - conn stop code will clean up.
++ * iscsi tcp may readd the task to the xmitqueue to send
++ * write data
+ */
+- if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
+- break;
++ conn->ctask = list_entry(conn->xmitqueue.next,
++ struct iscsi_cmd_task, running);
++ conn->ctask->state = ISCSI_TASK_RUNNING;
++ list_move_tail(conn->xmitqueue.next, &conn->run_list);
++ __iscsi_get_ctask(conn->ctask);
++ spin_unlock_bh(&conn->session->lock);
+
+- conn->task = list_entry(conn->requeue.next,
+- struct iscsi_task, running);
+- conn->task->state = ISCSI_TASK_RUNNING;
+- list_move_tail(conn->requeue.next, &conn->run_list);
+- rc = iscsi_xmit_task(conn);
+- if (rc)
++ rc = tt->xmit_cmd_task(conn, conn->ctask);
++
++ spin_lock_bh(&conn->session->lock);
++ __iscsi_put_ctask(conn->ctask);
++ if (rc) {
++ spin_unlock_bh(&conn->session->lock);
+ goto again;
+- if (!list_empty(&conn->mgmtqueue))
+- goto check_mgmt;
++ }
+ }
+ spin_unlock_bh(&conn->session->lock);
++ /* done with this ctask */
++ conn->ctask = NULL;
++
++ /* process the rest control plane PDUs, if any */
++ if (unlikely(__kfifo_len(conn->mgmtqueue))) {
++ while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
++ sizeof(void*))) {
++ spin_lock_bh(&conn->session->lock);
++ list_add_tail(&conn->mtask->running,
++ &conn->mgmt_run_list);
++ spin_unlock_bh(&conn->session->lock);
++ rc = iscsi_xmit_mtask(conn);
++ if (rc)
++ goto again;
++ }
++ }
++
+ return -ENODATA;
+
+ again:
+ if (unlikely(conn->suspend_tx))
+- rc = -ENODATA;
+- spin_unlock_bh(&conn->session->lock);
++ return -ENODATA;
++
+ return rc;
+ }
+
+@@ -1134,9 +724,11 @@ static void iscsi_xmitworker(struct work_struct *work)
+ /*
+ * serialize Xmit worker on a per-connection basis.
+ */
++ mutex_lock(&conn->xmitmutex);
+ do {
+ rc = iscsi_data_xmit(conn);
+ } while (rc >= 0 || rc == -EAGAIN);
++ mutex_unlock(&conn->xmitmutex);
+ }
+
+ enum {
+@@ -1148,36 +740,25 @@ enum {
+ FAILURE_SESSION_TERMINATE,
+ FAILURE_SESSION_IN_RECOVERY,
+ FAILURE_SESSION_RECOVERY_TIMEOUT,
+- FAILURE_SESSION_LOGGING_OUT,
+- FAILURE_SESSION_NOT_READY,
+ };
+
+ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ {
+- struct iscsi_cls_session *cls_session;
+ struct Scsi_Host *host;
+ int reason = 0;
+ struct iscsi_session *session;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task = NULL;
++ struct iscsi_cmd_task *ctask = NULL;
+
+ sc->scsi_done = done;
+ sc->result = 0;
+ sc->SCp.ptr = NULL;
+
+ host = sc->device->host;
+- spin_unlock(host->host_lock);
++ session = iscsi_hostdata(host->hostdata);
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+ spin_lock(&session->lock);
+
+- reason = iscsi_session_chkready(cls_session);
+- if (reason) {
+- sc->result = reason;
+- goto fault;
+- }
+-
+ /*
+ * ISCSI_STATE_FAILED is a temp. state. The recovery
+ * code will decide what is best to do with command queued
+@@ -1191,95 +772,77 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ * be entering our queuecommand while a block is starting
+ * up because the block code is not locked)
+ */
+- switch (session->state) {
+- case ISCSI_STATE_IN_RECOVERY:
++ if (session->state == ISCSI_STATE_IN_RECOVERY) {
+ reason = FAILURE_SESSION_IN_RECOVERY;
+- sc->result = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_STATE_LOGGING_OUT:
+- reason = FAILURE_SESSION_LOGGING_OUT;
+- sc->result = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_STATE_RECOVERY_FAILED:
++ goto reject;
++ }
++
++ if (session->state == ISCSI_STATE_RECOVERY_FAILED)
+ reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
+- sc->result = DID_NO_CONNECT << 16;
+- break;
+- case ISCSI_STATE_TERMINATE:
++ else if (session->state == ISCSI_STATE_TERMINATE)
+ reason = FAILURE_SESSION_TERMINATE;
+- sc->result = DID_NO_CONNECT << 16;
+- break;
+- default:
++ else
+ reason = FAILURE_SESSION_FREED;
+- sc->result = DID_NO_CONNECT << 16;
+- }
+ goto fault;
+ }
+
++ /*
++ * Check for iSCSI window and take care of CmdSN wrap-around
++ */
++ if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
++ reason = FAILURE_WINDOW_CLOSED;
++ goto reject;
++ }
++
+ conn = session->leadconn;
+ if (!conn) {
+ reason = FAILURE_SESSION_FREED;
+- sc->result = DID_NO_CONNECT << 16;
+ goto fault;
+ }
+
+- if (iscsi_check_cmdsn_window_closed(conn)) {
+- reason = FAILURE_WINDOW_CLOSED;
+- goto reject;
+- }
+-
+- if (!__kfifo_get(session->cmdpool.queue, (void*)&task,
++ if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
+ sizeof(void*))) {
+ reason = FAILURE_OOM;
+ goto reject;
+ }
+ sc->SCp.phase = session->age;
+- sc->SCp.ptr = (char *)task;
+-
+- atomic_set(&task->refcount, 1);
+- task->state = ISCSI_TASK_PENDING;
+- task->conn = conn;
+- task->sc = sc;
+- INIT_LIST_HEAD(&task->running);
+- list_add_tail(&task->running, &conn->xmitqueue);
+-
+- if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+- if (iscsi_prep_scsi_cmd_pdu(task)) {
+- sc->result = DID_ABORT << 16;
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- goto fault;
+- }
+- if (session->tt->xmit_task(task)) {
+- sc->scsi_done = NULL;
+- iscsi_complete_command(task);
+- reason = FAILURE_SESSION_NOT_READY;
+- goto reject;
+- }
+- } else
+- scsi_queue_work(session->host, &conn->xmitwork);
+-
+- session->queued_cmdsn++;
++ sc->SCp.ptr = (char *)ctask;
++
++ atomic_set(&ctask->refcount, 1);
++ ctask->state = ISCSI_TASK_PENDING;
++ ctask->mtask = NULL;
++ ctask->conn = conn;
++ ctask->sc = sc;
++ INIT_LIST_HEAD(&ctask->running);
++ ctask->total_length = sc->request_bufflen;
++ iscsi_prep_scsi_cmd_pdu(ctask);
++
++ session->tt->init_cmd_task(ctask);
++
++ list_add_tail(&ctask->running, &conn->xmitqueue);
++ debug_scsi(
++ "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
++ "win %d]\n",
++ sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
++ conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
++ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ spin_unlock(&session->lock);
+- spin_lock(host->host_lock);
++
++ scsi_queue_work(host, &conn->xmitwork);
+ return 0;
+
+ reject:
+ spin_unlock(&session->lock);
+ debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
+- spin_lock(host->host_lock);
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ fault:
+ spin_unlock(&session->lock);
+- debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
+- if (!scsi_bidi_cmnd(sc))
+- scsi_set_resid(sc, scsi_bufflen(sc));
+- else {
+- scsi_out(sc)->resid = scsi_out(sc)->length;
+- scsi_in(sc)->resid = scsi_in(sc)->length;
+- }
+- done(sc);
+- spin_lock(host->host_lock);
++ printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
++ sc->cmnd[0], reason);
++ sc->result = (DID_NO_CONNECT << 16);
++ sc->resid = sc->request_bufflen;
++ sc->scsi_done(sc);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_queuecommand);
+@@ -1293,15 +856,106 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
+
++static int
++iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_session *session = conn->session;
++ struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
++ struct iscsi_mgmt_task *mtask;
++
++ spin_lock_bh(&session->lock);
++ if (session->state == ISCSI_STATE_TERMINATE) {
++ spin_unlock_bh(&session->lock);
++ return -EPERM;
++ }
++ if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
++ hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
++ /*
++ * Login and Text are sent serially, in
++ * request-followed-by-response sequence.
++ * Same mtask can be used. Same ITT must be used.
++ * Note that login_mtask is preallocated at conn_create().
++ */
++ mtask = conn->login_mtask;
++ else {
++ BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
++ BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
++
++ nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&mtask, sizeof(void*))) {
++ spin_unlock_bh(&session->lock);
++ return -ENOSPC;
++ }
++ }
++
++ /*
++ * pre-format CmdSN for outgoing PDU.
++ */
++ if (hdr->itt != RESERVED_ITT) {
++ hdr->itt = build_itt(mtask->itt, conn->id, session->age);
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++ if (conn->c_stage == ISCSI_CONN_STARTED &&
++ !(hdr->opcode & ISCSI_OP_IMMEDIATE))
++ session->cmdsn++;
++ } else
++ /* do not advance CmdSN */
++ nop->cmdsn = cpu_to_be32(session->cmdsn);
++
++ if (data_size) {
++ memcpy(mtask->data, data, data_size);
++ mtask->data_count = data_size;
++ } else
++ mtask->data_count = 0;
++
++ INIT_LIST_HEAD(&mtask->running);
++ memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
++ if (session->tt->init_mgmt_task)
++ session->tt->init_mgmt_task(conn, mtask, data, data_size);
++ spin_unlock_bh(&session->lock);
++
++ debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
++ hdr->opcode, hdr->itt, data_size);
++
++ /*
++ * since send_pdu() could be called at least from two contexts,
++ * we need to serialize __kfifo_put, so we don't have to take
++ * additional lock on fast data-path
++ */
++ if (hdr->opcode & ISCSI_OP_IMMEDIATE)
++ __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
++ else
++ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
++
++ scsi_queue_work(session->host, &conn->xmitwork);
++ return 0;
++}
++
++int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
++ char *data, uint32_t data_size)
++{
++ struct iscsi_conn *conn = cls_conn->dd_data;
++ int rc;
++
++ mutex_lock(&conn->xmitmutex);
++ rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
++ mutex_unlock(&conn->xmitmutex);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
++
+ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
++ struct iscsi_conn *conn = session->leadconn;
+
+ spin_lock_bh(&session->lock);
+ if (session->state != ISCSI_STATE_LOGGED_IN) {
+ session->state = ISCSI_STATE_RECOVERY_FAILED;
+- if (session->leadconn)
+- wake_up(&session->leadconn->ehwait);
++ if (conn)
++ wake_up(&conn->ehwait);
+ }
+ spin_unlock_bh(&session->lock);
+ }
+@@ -1309,32 +963,33 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
+
+ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
++ struct Scsi_Host *host = sc->device->host;
++ struct iscsi_session *session = iscsi_hostdata(host->hostdata);
++ struct iscsi_conn *conn = session->leadconn;
++ int fail_session = 0;
+
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+- conn = session->leadconn;
+-
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+ failed:
+ debug_scsi("failing host reset: session terminated "
+ "[CID %d age %d]\n", conn->id, session->age);
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return FAILED;
+ }
+
++ if (sc->SCp.phase == session->age) {
++ debug_scsi("failing connection CID %d due to SCSI host reset\n",
++ conn->id);
++ fail_session = 1;
++ }
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
+ /*
+ * we drop the lock here but the leadconn cannot be destoyed while
+ * we are in the scsi eh
+ */
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ if (fail_session)
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+
+ debug_scsi("iscsi_eh_host_reset wait for relogin\n");
+ wait_event_interruptible(conn->ehwait,
+@@ -1344,717 +999,472 @@ failed:
+ if (signal_pending(current))
+ flush_signals(current);
+
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_LOGGED_IN)
+- iscsi_session_printk(KERN_INFO, session,
+- "host reset succeeded\n");
++ printk(KERN_INFO "iscsi: host reset succeeded\n");
+ else
+ goto failed;
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
+ return SUCCESS;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
+
+-static void iscsi_tmf_timedout(unsigned long data)
++static void iscsi_tmabort_timedout(unsigned long data)
+ {
+- struct iscsi_conn *conn = (struct iscsi_conn *)data;
++ struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&session->lock);
+- if (conn->tmf_state == TMF_QUEUED) {
+- conn->tmf_state = TMF_TIMEDOUT;
+- debug_scsi("tmf timedout\n");
++ if (conn->tmabort_state == TMABORT_INITIAL) {
++ conn->tmabort_state = TMABORT_TIMEDOUT;
++ debug_scsi("tmabort timedout [sc %p itt 0x%x]\n",
++ ctask->sc, ctask->itt);
+ /* unblock eh_abort() */
+ wake_up(&conn->ehwait);
+ }
+ spin_unlock(&session->lock);
+ }
+
+-static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
+- struct iscsi_tm *hdr, int age,
+- int timeout)
++/* must be called with the mutex lock */
++static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
++ struct iscsi_cmd_task *ctask)
+ {
++ struct iscsi_conn *conn = ctask->conn;
+ struct iscsi_session *session = conn->session;
+- struct iscsi_task *task;
++ struct iscsi_tm *hdr = &conn->tmhdr;
++ int rc;
+
+- task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
+- NULL, 0);
+- if (!task) {
+- spin_unlock_bh(&session->lock);
++ /*
++ * ctask timed out but session is OK requests must be serialized.
++ */
++ memset(hdr, 0, sizeof(struct iscsi_tm));
++ hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
++ hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
++ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
++ memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
++ hdr->rtt = ctask->hdr->itt;
++ hdr->refcmdsn = ctask->hdr->cmdsn;
++
++ rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
++ NULL, 0);
++ if (rc) {
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- spin_lock_bh(&session->lock);
+- debug_scsi("tmf exec failure\n");
+- return -EPERM;
++ debug_scsi("abort sent failure [itt 0x%x] %d\n", ctask->itt,
++ rc);
++ return rc;
+ }
+- conn->tmfcmd_pdus_cnt++;
+- conn->tmf_timer.expires = timeout * HZ + jiffies;
+- conn->tmf_timer.function = iscsi_tmf_timedout;
+- conn->tmf_timer.data = (unsigned long)conn;
+- add_timer(&conn->tmf_timer);
+- debug_scsi("tmf set timeout\n");
+
++ debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
++
++ spin_lock_bh(&session->lock);
++ ctask->mtask = (struct iscsi_mgmt_task *)
++ session->mgmt_cmds[get_itt(hdr->itt) -
++ ISCSI_MGMT_ITT_OFFSET];
++
++ if (conn->tmabort_state == TMABORT_INITIAL) {
++ conn->tmfcmd_pdus_cnt++;
++ conn->tmabort_timer.expires = 10*HZ + jiffies;
++ conn->tmabort_timer.function = iscsi_tmabort_timedout;
++ conn->tmabort_timer.data = (unsigned long)ctask;
++ add_timer(&conn->tmabort_timer);
++ debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
++ }
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++ mutex_unlock(&conn->xmitmutex);
+
+ /*
+ * block eh thread until:
+ *
+- * 1) tmf response
+- * 2) tmf timeout
++ * 1) abort response
++ * 2) abort timeout
+ * 3) session is terminated or restarted or userspace has
+ * given up on recovery
+ */
+- wait_event_interruptible(conn->ehwait, age != session->age ||
++ wait_event_interruptible(conn->ehwait,
++ sc->SCp.phase != session->age ||
+ session->state != ISCSI_STATE_LOGGED_IN ||
+- conn->tmf_state != TMF_QUEUED);
++ conn->tmabort_state != TMABORT_INITIAL);
+ if (signal_pending(current))
+ flush_signals(current);
+- del_timer_sync(&conn->tmf_timer);
++ del_timer_sync(&conn->tmabort_timer);
+
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+- /* if the session drops it will clean up the task */
+- if (age != session->age ||
+- session->state != ISCSI_STATE_LOGGED_IN)
+- return -ENOTCONN;
++ mutex_lock(&conn->xmitmutex);
+ return 0;
+ }
+
+ /*
+- * Fail commands. session lock held and recv side suspended and xmit
+- * thread flushed
++ * xmit mutex and session lock must be held
+ */
+-static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+- int error)
++static struct iscsi_mgmt_task *
++iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
+ {
+- struct iscsi_task *task, *tmp;
++ int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
++ struct iscsi_mgmt_task *task;
+
+- if (conn->task && (conn->task->sc->device->lun == lun || lun == -1))
+- conn->task = NULL;
++ debug_scsi("searching %d tasks\n", nr_tasks);
+
+- /* flush pending */
+- list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing pending sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
+- }
+- }
++ for (i = 0; i < nr_tasks; i++) {
++ __kfifo_get(fifo, (void*)&task, sizeof(void*));
++ debug_scsi("check task %u\n", task->itt);
+
+- list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing requeued sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, error << 16);
++ if (task->itt == itt) {
++ debug_scsi("matched task\n");
++ return task;
+ }
+- }
+
+- /* fail all other running */
+- list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
+- if (lun == task->sc->device->lun || lun == -1) {
+- debug_scsi("failing in progress sc %p itt 0x%x\n",
+- task->sc, task->itt);
+- fail_command(conn, task, DID_BUS_BUSY << 16);
+- }
++ __kfifo_put(fifo, (void*)&task, sizeof(void*));
+ }
++ return NULL;
+ }
+
+-void iscsi_suspend_tx(struct iscsi_conn *conn)
+-{
+- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_flush_work(conn->session->host);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
+-
+-static void iscsi_start_tx(struct iscsi_conn *conn)
+-{
+- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+- if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
+-}
+-
+-static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
++static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+- enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
+-
+- cls_session = starget_to_session(scsi_target(scmd->device));
+- session = cls_session->dd_data;
+-
+- debug_scsi("scsi cmd %p timedout\n", scmd);
+-
+- spin_lock(&session->lock);
+- if (session->state != ISCSI_STATE_LOGGED_IN) {
+- /*
+- * We are probably in the middle of iscsi recovery so let
+- * that complete and handle the error.
+- */
+- rc = EH_RESET_TIMER;
+- goto done;
+- }
++ struct iscsi_conn *conn = ctask->conn;
++ struct iscsi_session *session = conn->session;
+
+- conn = session->leadconn;
+- if (!conn) {
+- /* In the middle of shuting down */
+- rc = EH_RESET_TIMER;
+- goto done;
+- }
++ if (!ctask->mtask)
++ return -EINVAL;
+
+- if (!conn->recv_timeout && !conn->ping_timeout)
+- goto done;
+- /*
+- * if the ping timedout then we are in the middle of cleaning up
+- * and can let the iscsi eh handle it
+- */
+- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+- (conn->ping_timeout * HZ), jiffies))
+- rc = EH_RESET_TIMER;
+- /*
+- * if we are about to check the transport then give the command
+- * more time
+- */
+- if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
+- jiffies))
+- rc = EH_RESET_TIMER;
+- /* if in the middle of checking the transport then give us more time */
+- if (conn->ping_task)
+- rc = EH_RESET_TIMER;
+-done:
+- spin_unlock(&session->lock);
+- debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
+- return rc;
++ if (!iscsi_remove_mgmt_task(conn->immqueue, ctask->mtask->itt))
++ list_del(&ctask->mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
++ sizeof(void*));
++ ctask->mtask = NULL;
++ return 0;
+ }
+
+-static void iscsi_check_transport_timeouts(unsigned long data)
++/*
++ * session lock and xmitmutex must be held
++ */
++static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
++ int err)
+ {
+- struct iscsi_conn *conn = (struct iscsi_conn *)data;
+- struct iscsi_session *session = conn->session;
+- unsigned long recv_timeout, next_timeout = 0, last_recv;
++ struct scsi_cmnd *sc;
+
+- spin_lock(&session->lock);
+- if (session->state != ISCSI_STATE_LOGGED_IN)
+- goto done;
+-
+- recv_timeout = conn->recv_timeout;
+- if (!recv_timeout)
+- goto done;
+-
+- recv_timeout *= HZ;
+- last_recv = conn->last_recv;
+- if (conn->ping_task &&
+- time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
+- jiffies)) {
+- iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+- "expired, last rx %lu, last ping %lu, "
+- "now %lu\n", conn->ping_timeout, last_recv,
+- conn->last_ping, jiffies);
+- spin_unlock(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ sc = ctask->sc;
++ if (!sc)
+ return;
+- }
+
+- if (time_before_eq(last_recv + recv_timeout, jiffies)) {
+- /* send a ping to try to provoke some traffic */
+- debug_scsi("Sending nopout as ping on conn %p\n", conn);
+- iscsi_send_nopout(conn, NULL);
+- next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+- } else
+- next_timeout = last_recv + recv_timeout;
++ conn->session->tt->cleanup_cmd_task(conn, ctask);
++ iscsi_ctask_mtask_cleanup(ctask);
+
+- debug_scsi("Setting next tmo %lu\n", next_timeout);
+- mod_timer(&conn->transport_timer, next_timeout);
+-done:
+- spin_unlock(&session->lock);
+-}
+-
+-static void iscsi_prep_abort_task_pdu(struct iscsi_task *task,
+- struct iscsi_tm *hdr)
+-{
+- memset(hdr, 0, sizeof(*hdr));
+- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+- hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- memcpy(hdr->lun, task->hdr->lun, sizeof(hdr->lun));
+- hdr->rtt = task->hdr->itt;
+- hdr->refcmdsn = task->hdr->cmdsn;
++ sc->result = err;
++ sc->resid = sc->request_bufflen;
++ /* release ref from queuecommand */
++ __iscsi_put_ctask(ctask);
+ }
+
+ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ {
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
++ struct iscsi_cmd_task *ctask;
+ struct iscsi_conn *conn;
+- struct iscsi_task *task;
+- struct iscsi_tm *hdr;
+- int rc, age;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
++ struct iscsi_session *session;
++ int rc;
+
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ debug_scsi("sc never reached iscsi layer or it completed.\n");
+- spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return SUCCESS;
+ }
+
++ ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
++ conn = ctask->conn;
++ session = conn->session;
++
++ conn->eh_abort_cnt++;
++ debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
++
++ mutex_lock(&conn->xmitmutex);
++ spin_lock_bh(&session->lock);
++
+ /*
+ * If we are not logged in or we have started a new session
+ * then let the host reset code handle this
+ */
+- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
+- sc->SCp.phase != session->age) {
+- spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+- return FAILED;
+- }
+-
+- conn = session->leadconn;
+- conn->eh_abort_cnt++;
+- age = session->age;
+-
+- task = (struct iscsi_task *)sc->SCp.ptr;
+- debug_scsi("aborting [sc %p itt 0x%x]\n", sc, task->itt);
++ if (session->state != ISCSI_STATE_LOGGED_IN ||
++ sc->SCp.phase != session->age)
++ goto failed;
+
+- /* task completed before time out */
+- if (!task->sc) {
++ /* ctask completed before time out */
++ if (!ctask->sc) {
++ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+- goto success;
++ goto success_rel_mutex;
+ }
+
+- if (task->state == ISCSI_TASK_PENDING) {
+- fail_command(conn, task, DID_ABORT << 16);
+- goto success;
++ /* what should we do here ? */
++ if (conn->ctask == ctask) {
++ printk(KERN_INFO "iscsi: sc %p itt 0x%x partially sent. "
++ "Failing abort\n", sc, ctask->itt);
++ goto failed;
+ }
+
+- /* only have one tmf outstanding at a time */
+- if (conn->tmf_state != TMF_INITIAL)
+- goto failed;
+- conn->tmf_state = TMF_QUEUED;
++ if (ctask->state == ISCSI_TASK_PENDING)
++ goto success_cleanup;
+
+- hdr = &conn->tmhdr;
+- iscsi_prep_abort_task_pdu(task, hdr);
++ conn->tmabort_state = TMABORT_INITIAL;
+
+- if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
+- rc = FAILED;
+- goto failed;
+- }
++ spin_unlock_bh(&session->lock);
++ rc = iscsi_exec_abort_task(sc, ctask);
++ spin_lock_bh(&session->lock);
+
+- switch (conn->tmf_state) {
+- case TMF_SUCCESS:
+- spin_unlock_bh(&session->lock);
+- /*
+- * stop tx side incase the target had sent a abort rsp but
+- * the initiator was still writing out data.
+- */
+- iscsi_suspend_tx(conn);
+- /*
+- * we do not stop the recv side because targets have been
+- * good and have never sent us a successful tmf response
+- * then sent more data for the cmd.
+- */
+- spin_lock(&session->lock);
+- fail_command(conn, task, DID_ABORT << 16);
+- conn->tmf_state = TMF_INITIAL;
+- spin_unlock(&session->lock);
+- iscsi_start_tx(conn);
+- goto success_unlocked;
+- case TMF_TIMEDOUT:
+- spin_unlock_bh(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- goto failed_unlocked;
+- case TMF_NOT_FOUND:
+- if (!sc->SCp.ptr) {
+- conn->tmf_state = TMF_INITIAL;
+- /* task completed before tmf abort response */
++ if (rc || sc->SCp.phase != session->age ||
++ session->state != ISCSI_STATE_LOGGED_IN)
++ goto failed;
++ iscsi_ctask_mtask_cleanup(ctask);
++
++ switch (conn->tmabort_state) {
++ case TMABORT_SUCCESS:
++ goto success_cleanup;
++ case TMABORT_NOT_FOUND:
++ if (!ctask->sc) {
++ /* ctask completed before tmf abort response */
++ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+- goto success;
++ goto success_rel_mutex;
+ }
+ /* fall through */
+ default:
+- conn->tmf_state = TMF_INITIAL;
++ /* timedout or failed */
++ spin_unlock_bh(&session->lock);
++ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
++ spin_lock_bh(&session->lock);
+ goto failed;
+ }
+
+-success:
++success_cleanup:
++ debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
+ spin_unlock_bh(&session->lock);
+-success_unlocked:
+- debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, task->itt);
+- mutex_unlock(&session->eh_mutex);
+- return SUCCESS;
+
+-failed:
+- spin_unlock_bh(&session->lock);
+-failed_unlocked:
+- debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
+- task ? task->itt : 0);
+- mutex_unlock(&session->eh_mutex);
+- return FAILED;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+-
+-static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
+-{
+- memset(hdr, 0, sizeof(*hdr));
+- hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
+- hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
+- hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+- int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
+- hdr->rtt = RESERVED_ITT;
+-}
+-
+-int iscsi_eh_device_reset(struct scsi_cmnd *sc)
+-{
+- struct iscsi_cls_session *cls_session;
+- struct iscsi_session *session;
+- struct iscsi_conn *conn;
+- struct iscsi_tm *hdr;
+- int rc = FAILED;
+-
+- cls_session = starget_to_session(scsi_target(sc->device));
+- session = cls_session->dd_data;
+-
+- debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+-
+- mutex_lock(&session->eh_mutex);
+- spin_lock_bh(&session->lock);
+ /*
+- * Just check if we are not logged in. We cannot check for
+- * the phase because the reset could come from a ioctl.
++ * clean up task if aborted. we have the xmitmutex so grab
++ * the recv lock as a writer
+ */
+- if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+- goto unlock;
+- conn = session->leadconn;
+-
+- /* only have one tmf outstanding at a time */
+- if (conn->tmf_state != TMF_INITIAL)
+- goto unlock;
+- conn->tmf_state = TMF_QUEUED;
+-
+- hdr = &conn->tmhdr;
+- iscsi_prep_lun_reset_pdu(sc, hdr);
+-
+- if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
+- session->lu_reset_timeout)) {
+- rc = FAILED;
+- goto unlock;
+- }
+-
+- switch (conn->tmf_state) {
+- case TMF_SUCCESS:
+- break;
+- case TMF_TIMEDOUT:
+- spin_unlock_bh(&session->lock);
+- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+- goto done;
+- default:
+- conn->tmf_state = TMF_INITIAL;
+- goto unlock;
+- }
+-
+- rc = SUCCESS;
+- spin_unlock_bh(&session->lock);
+-
+- iscsi_suspend_tx(conn);
+-
++ write_lock_bh(conn->recv_lock);
+ spin_lock(&session->lock);
+- fail_all_commands(conn, sc->device->lun, DID_ERROR);
+- conn->tmf_state = TMF_INITIAL;
++ fail_command(conn, ctask, DID_ABORT << 16);
+ spin_unlock(&session->lock);
++ write_unlock_bh(conn->recv_lock);
+
+- iscsi_start_tx(conn);
+- goto done;
++success_rel_mutex:
++ mutex_unlock(&conn->xmitmutex);
++ return SUCCESS;
+
+-unlock:
++failed:
+ spin_unlock_bh(&session->lock);
+-done:
+- debug_scsi("iscsi_eh_device_reset %s\n",
+- rc == SUCCESS ? "SUCCESS" : "FAILED");
+- mutex_unlock(&session->eh_mutex);
+- return rc;
++ mutex_unlock(&conn->xmitmutex);
++
++ debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
++ return FAILED;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
++EXPORT_SYMBOL_GPL(iscsi_eh_abort);
+
+-/*
+- * Pre-allocate a pool of @max items of @item_size. By default, the pool
+- * should be accessed via kfifo_{get,put} on q->queue.
+- * Optionally, the caller can obtain the array of object pointers
+- * by passing in a non-NULL @items pointer
+- */
+ int
+-iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
++iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
+ {
+- int i, num_arrays = 1;
++ int i;
+
+- memset(q, 0, sizeof(*q));
++ *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
++ if (*items == NULL)
++ return -ENOMEM;
+
+ q->max = max;
+-
+- /* If the user passed an items pointer, he wants a copy of
+- * the array. */
+- if (items)
+- num_arrays++;
+- q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+- if (q->pool == NULL)
+- goto enomem;
++ q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
++ if (q->pool == NULL) {
++ kfree(*items);
++ return -ENOMEM;
++ }
+
+ q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
+ GFP_KERNEL, NULL);
+- if (q->queue == ERR_PTR(-ENOMEM))
+- goto enomem;
++ if (q->queue == ERR_PTR(-ENOMEM)) {
++ kfree(q->pool);
++ kfree(*items);
++ return -ENOMEM;
++ }
+
+ for (i = 0; i < max; i++) {
+- q->pool[i] = kzalloc(item_size, GFP_KERNEL);
++ q->pool[i] = kmalloc(item_size, GFP_KERNEL);
+ if (q->pool[i] == NULL) {
+- q->max = i;
+- goto enomem;
++ int j;
++
++ for (j = 0; j < i; j++)
++ kfree(q->pool[j]);
++
++ kfifo_free(q->queue);
++ kfree(q->pool);
++ kfree(*items);
++ return -ENOMEM;
+ }
++ memset(q->pool[i], 0, item_size);
++ (*items)[i] = q->pool[i];
+ __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
+ }
+-
+- if (items) {
+- *items = q->pool + max;
+- memcpy(*items, q->pool, max * sizeof(void *));
+- }
+-
+ return 0;
+-
+-enomem:
+- iscsi_pool_free(q);
+- return -ENOMEM;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_init);
+
+-void iscsi_pool_free(struct iscsi_pool *q)
++void iscsi_pool_free(struct iscsi_queue *q, void **items)
+ {
+ int i;
+
+ for (i = 0; i < q->max; i++)
+- kfree(q->pool[i]);
+- if (q->pool)
+- kfree(q->pool);
++ kfree(items[i]);
++ kfree(q->pool);
++ kfree(items);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
+
+-/**
+- * iscsi_host_add - add host to system
+- * @shost: scsi host
+- * @pdev: parent device
+- *
+- * This should be called by partial offload and software iscsi drivers
+- * to add a host to the system.
+- */
+-int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
+-{
+- if (!shost->can_queue)
+- shost->can_queue = ISCSI_DEF_XMIT_CMDS_MAX;
+-
+- return scsi_add_host(shost, pdev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_add);
+-
+-/**
+- * iscsi_host_alloc - allocate a host and driver data
+- * @sht: scsi host template
+- * @dd_data_size: driver host data size
+- * @qdepth: default device queue depth
+- *
+- * This should be called by partial offload and software iscsi drivers.
+- * To access the driver specific memory use the iscsi_host_priv() macro.
+- */
+-struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth)
+-{
+- struct Scsi_Host *shost;
+-
+- shost = scsi_host_alloc(sht, sizeof(struct iscsi_host) + dd_data_size);
+- if (!shost)
+- return NULL;
+- shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
+-
+- if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
+- if (qdepth != 0)
+- printk(KERN_ERR "iscsi: invalid queue depth of %d. "
+- "Queue depth must be between 1 and %d.\n",
+- qdepth, ISCSI_MAX_CMD_PER_LUN);
+- qdepth = ISCSI_DEF_CMD_PER_LUN;
+- }
+- shost->cmd_per_lun = qdepth;
+- return shost;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_alloc);
+-
+-/**
+- * iscsi_host_remove - remove host and sessions
+- * @shost: scsi host
++/*
++ * iSCSI Session's hostdata organization:
+ *
+- * This will also remove any sessions attached to the host, but if userspace
+- * is managing the session at the same time this will break. TODO: add
+- * refcounting to the netlink iscsi interface so a rmmod or host hot unplug
+- * does not remove the memory from under us.
++ * *------------------* <== hostdata_session(host->hostdata)
++ * | ptr to class sess|
++ * |------------------| <== iscsi_hostdata(host->hostdata)
++ * | iscsi_session |
++ * *------------------*
+ */
+-void iscsi_host_remove(struct Scsi_Host *shost)
+-{
+- iscsi_host_for_each_session(shost, iscsi_session_teardown);
+- scsi_remove_host(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_remove);
+
+-void iscsi_host_free(struct Scsi_Host *shost)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
++#define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
++ _sz % sizeof(unsigned long))
+
+- kfree(ihost->netdev);
+- kfree(ihost->hwaddress);
+- kfree(ihost->initiatorname);
+- scsi_host_put(shost);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_free);
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
+
+ /**
+ * iscsi_session_setup - create iscsi cls session and host and session
++ * @scsit: scsi transport template
+ * @iscsit: iscsi transport template
+- * @shost: scsi host
+- * @cmds_max: session can queue
+- * @cmd_task_size: LLD task private data size
+ * @initial_cmdsn: initial CmdSN
++ * @hostno: host no allocated
+ *
+ * This can be used by software iscsi_transports that allocate
+ * a session per scsi host.
+- *
+- * Callers should set cmds_max to the largest total numer (mgmt + scsi) of
+- * tasks they support. The iscsi layer reserves ISCSI_MGMT_CMDS_MAX tasks
+- * for nop handling and login/logout requests.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
+- uint16_t cmds_max, int cmd_task_size,
+- uint32_t initial_cmdsn, unsigned int id)
++iscsi_session_setup(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ int cmd_task_size, int mgmt_task_size,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
++ struct Scsi_Host *shost;
+ struct iscsi_session *session;
+ struct iscsi_cls_session *cls_session;
+- int cmd_i, scsi_cmds, total_cmds = cmds_max;
++ int cmd_i;
+
+- if (!total_cmds)
+- total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
+- /*
+- * The iscsi layer needs some tasks for nop handling and tmfs,
+- * so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
+- * + 1 command for scsi IO.
+- */
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of two that is at least %d.\n",
+- total_cmds, ISCSI_TOTAL_CMDS_MIN);
++ shost = scsi_host_alloc(iscsit->host_template,
++ hostdata_privsize(sizeof(*session)));
++ if (!shost)
+ return NULL;
+- }
+-
+- if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2 less than or equal to %d.\n",
+- cmds_max, ISCSI_TOTAL_CMDS_MAX);
+- total_cmds = ISCSI_TOTAL_CMDS_MAX;
+- }
+
+- if (!is_power_of_2(total_cmds)) {
+- printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
+- "must be a power of 2.\n", total_cmds);
+- total_cmds = rounddown_pow_of_two(total_cmds);
+- if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
+- return NULL;
+- printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
+- total_cmds);
+- }
+- scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
++ shost->max_id = 1;
++ shost->max_channel = 0;
++ shost->max_lun = iscsit->max_lun;
++ shost->max_cmd_len = iscsit->max_cmd_len;
++ shost->transportt = scsit;
++ shost->transportt->create_work_queue = 1;
++ *hostno = shost->host_no;
+
+- cls_session = iscsi_alloc_session(shost, iscsit,
+- sizeof(struct iscsi_session));
+- if (!cls_session)
+- return NULL;
+- session = cls_session->dd_data;
+- session->cls_session = cls_session;
++ session = iscsi_hostdata(shost->hostdata);
++ memset(session, 0, sizeof(struct iscsi_session));
+ session->host = shost;
+ session->state = ISCSI_STATE_FREE;
+- session->fast_abort = 1;
+- session->lu_reset_timeout = 15;
+- session->abort_timeout = 10;
+- session->scsi_cmds_max = scsi_cmds;
+- session->cmds_max = total_cmds;
+- session->queued_cmdsn = session->cmdsn = initial_cmdsn;
++ session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
++ session->cmds_max = ISCSI_XMIT_CMDS_MAX;
++ session->cmdsn = initial_cmdsn;
+ session->exp_cmdsn = initial_cmdsn + 1;
+ session->max_cmdsn = initial_cmdsn + 1;
+ session->max_r2t = 1;
+ session->tt = iscsit;
+- mutex_init(&session->eh_mutex);
+- spin_lock_init(&session->lock);
+
+ /* initialize SCSI PDU commands pool */
+ if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
+ (void***)&session->cmds,
+- cmd_task_size + sizeof(struct iscsi_task)))
++ cmd_task_size + sizeof(struct iscsi_cmd_task)))
+ goto cmdpool_alloc_fail;
+
+ /* pre-format cmds pool with ITT */
+ for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
+- struct iscsi_task *task = session->cmds[cmd_i];
++ struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
+
+ if (cmd_task_size)
+- task->dd_data = &task[1];
+- task->itt = cmd_i;
+- INIT_LIST_HEAD(&task->running);
++ ctask->dd_data = &ctask[1];
++ ctask->itt = cmd_i;
++ INIT_LIST_HEAD(&ctask->running);
+ }
+
+- if (!try_module_get(iscsit->owner))
+- goto module_get_fail;
++ spin_lock_init(&session->lock);
++
++ /* initialize immediate command pool */
++ if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
++ (void***)&session->mgmt_cmds,
++ mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
++ goto mgmtpool_alloc_fail;
++
++
++ /* pre-format immediate cmds pool with ITT */
++ for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
++ struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
++
++ if (mgmt_task_size)
++ mtask->dd_data = &mtask[1];
++ mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
++ INIT_LIST_HEAD(&mtask->running);
++ }
++
++ if (scsi_add_host(shost, NULL))
++ goto add_host_fail;
+
+- if (iscsi_add_session(cls_session, id))
++ if (!try_module_get(iscsit->owner))
+ goto cls_session_fail;
++
++ cls_session = iscsi_create_session(shost, iscsit, 0);
++ if (!cls_session)
++ goto module_put;
++ *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
++
+ return cls_session;
+
+-cls_session_fail:
++module_put:
+ module_put(iscsit->owner);
+-module_get_fail:
+- iscsi_pool_free(&session->cmdpool);
++cls_session_fail:
++ scsi_remove_host(shost);
++add_host_fail:
++ iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
++mgmtpool_alloc_fail:
++ iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ cmdpool_alloc_fail:
+- iscsi_free_session(cls_session);
++ scsi_host_put(shost);
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_setup);
+
+ /**
+ * iscsi_session_teardown - destroy session, host, and cls_session
+- * @cls_session: iscsi session
++ * shost: scsi host
+ *
+- * The driver must have called iscsi_remove_session before
+- * calling this.
+- */
++ * This can be used by software iscsi_transports that allocate
++ * a session per scsi host.
++ **/
+ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
+
+- iscsi_pool_free(&session->cmdpool);
++ scsi_remove_host(shost);
++
++ iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
++ iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+
+- kfree(session->password);
+- kfree(session->password_in);
+- kfree(session->username);
+- kfree(session->username_in);
+ kfree(session->targetname);
+- kfree(session->initiatorname);
+- kfree(session->ifacename);
+
+ iscsi_destroy_session(cls_session);
++ scsi_host_put(shost);
+ module_put(owner);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+@@ -2062,68 +1472,74 @@ EXPORT_SYMBOL_GPL(iscsi_session_teardown);
+ /**
+ * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
+ * @cls_session: iscsi_cls_session
+- * @dd_size: private driver data size
+ * @conn_idx: cid
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
+- uint32_t conn_idx)
++iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn;
+ struct iscsi_cls_conn *cls_conn;
+ char *data;
+
+- cls_conn = iscsi_create_conn(cls_session, sizeof(*conn) + dd_size,
+- conn_idx);
++ cls_conn = iscsi_create_conn(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+- memset(conn, 0, sizeof(*conn) + dd_size);
++ memset(conn, 0, sizeof(*conn));
+
+- conn->dd_data = cls_conn->dd_data + sizeof(*conn);
+ conn->session = session;
+ conn->cls_conn = cls_conn;
+ conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
+ conn->id = conn_idx;
+ conn->exp_statsn = 0;
+- conn->tmf_state = TMF_INITIAL;
+-
+- init_timer(&conn->transport_timer);
+- conn->transport_timer.data = (unsigned long)conn;
+- conn->transport_timer.function = iscsi_check_transport_timeouts;
+-
++ conn->tmabort_state = TMABORT_INITIAL;
+ INIT_LIST_HEAD(&conn->run_list);
+ INIT_LIST_HEAD(&conn->mgmt_run_list);
+- INIT_LIST_HEAD(&conn->mgmtqueue);
+ INIT_LIST_HEAD(&conn->xmitqueue);
+- INIT_LIST_HEAD(&conn->requeue);
++
++ /* initialize general immediate & non-immediate PDU commands queue */
++ conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
++ GFP_KERNEL, NULL);
++ if (conn->immqueue == ERR_PTR(-ENOMEM))
++ goto immqueue_alloc_fail;
++
++ conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
++ GFP_KERNEL, NULL);
++ if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
++ goto mgmtqueue_alloc_fail;
++
+ INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
+
+- /* allocate login_task used for the login/text sequences */
++ /* allocate login_mtask used for the login/text sequences */
+ spin_lock_bh(&session->lock);
+- if (!__kfifo_get(session->cmdpool.queue,
+- (void*)&conn->login_task,
++ if (!__kfifo_get(session->mgmtpool.queue,
++ (void*)&conn->login_mtask,
+ sizeof(void*))) {
+ spin_unlock_bh(&session->lock);
+- goto login_task_alloc_fail;
++ goto login_mtask_alloc_fail;
+ }
+ spin_unlock_bh(&session->lock);
+
+ data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
+ if (!data)
+- goto login_task_data_alloc_fail;
+- conn->login_task->data = conn->data = data;
++ goto login_mtask_data_alloc_fail;
++ conn->login_mtask->data = conn->data = data;
+
+- init_timer(&conn->tmf_timer);
++ init_timer(&conn->tmabort_timer);
++ mutex_init(&conn->xmitmutex);
+ init_waitqueue_head(&conn->ehwait);
+
+ return cls_conn;
+
+-login_task_data_alloc_fail:
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++login_mtask_data_alloc_fail:
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+-login_task_alloc_fail:
++login_mtask_alloc_fail:
++ kfifo_free(conn->mgmtqueue);
++mgmtqueue_alloc_fail:
++ kfifo_free(conn->immqueue);
++immqueue_alloc_fail:
+ iscsi_destroy_conn(cls_conn);
+ return NULL;
+ }
+@@ -2142,7 +1558,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_session *session = conn->session;
+ unsigned long flags;
+
+- del_timer_sync(&conn->transport_timer);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
++ mutex_lock(&conn->xmitmutex);
+
+ spin_lock_bh(&session->lock);
+ conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
+@@ -2155,6 +1572,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
++ mutex_unlock(&conn->xmitmutex);
++
+ /*
+ * Block until all in-progress commands for this connection
+ * time out or fail.
+@@ -2167,10 +1586,9 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_irqrestore(session->host->host_lock, flags);
+ msleep_interruptible(500);
+- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
+- "host_busy %d host_failed %d\n",
+- session->host->host_busy,
+- session->host->host_failed);
++ printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d "
++ "host_failed %d\n", session->host->host_busy,
++ session->host->host_failed);
+ /*
+ * force eh_abort() to unblock
+ */
+@@ -2178,17 +1596,23 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+
+ /* flush queued up work because we free the connection below */
+- iscsi_suspend_tx(conn);
++ scsi_flush_work(session->host);
+
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
+- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
++ __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
+ sizeof(void*));
+- if (session->leadconn == conn)
++ if (session->leadconn == conn) {
+ session->leadconn = NULL;
++ /* no connections exits.. reset sequencing */
++ session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
++ }
+ spin_unlock_bh(&session->lock);
+
++ kfifo_free(conn->immqueue);
++ kfifo_free(conn->mgmtqueue);
++
+ iscsi_destroy_conn(cls_conn);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
+@@ -2199,41 +1623,21 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ struct iscsi_session *session = conn->session;
+
+ if (!session) {
+- iscsi_conn_printk(KERN_ERR, conn,
+- "can't start unbound connection\n");
++ printk(KERN_ERR "iscsi: can't start unbound connection\n");
+ return -EPERM;
+ }
+
+ if ((session->imm_data_en || !session->initial_r2t_en) &&
+ session->first_burst > session->max_burst) {
+- iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
+- "first_burst %d max_burst %d\n",
+- session->first_burst, session->max_burst);
++ printk("iscsi: invalid burst lengths: "
++ "first_burst %d max_burst %d\n",
++ session->first_burst, session->max_burst);
+ return -EINVAL;
+ }
+
+- if (conn->ping_timeout && !conn->recv_timeout) {
+- iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
+- "zero. Using 5 seconds\n.");
+- conn->recv_timeout = 5;
+- }
+-
+- if (conn->recv_timeout && !conn->ping_timeout) {
+- iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
+- "zero. Using 5 seconds.\n");
+- conn->ping_timeout = 5;
+- }
+-
+ spin_lock_bh(&session->lock);
+ conn->c_stage = ISCSI_CONN_STARTED;
+ session->state = ISCSI_STATE_LOGGED_IN;
+- session->queued_cmdsn = session->cmdsn;
+-
+- conn->last_recv = jiffies;
+- conn->last_ping = jiffies;
+- if (conn->recv_timeout && conn->ping_timeout)
+- mod_timer(&conn->transport_timer,
+- jiffies + (conn->recv_timeout * HZ));
+
+ switch(conn->stop_stage) {
+ case STOP_CONN_RECOVER:
+@@ -2242,11 +1646,13 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ * commands after successful recovery
+ */
+ conn->stop_stage = 0;
+- conn->tmf_state = TMF_INITIAL;
++ conn->tmabort_state = TMABORT_INITIAL;
+ session->age++;
+- if (session->age == 16)
+- session->age = 0;
+- break;
++ spin_unlock_bh(&session->lock);
++
++ iscsi_unblock_session(session_to_cls(session));
++ wake_up(&conn->ehwait);
++ return 0;
+ case STOP_CONN_TERM:
+ conn->stop_stage = 0;
+ break;
+@@ -2255,8 +1661,6 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
+ }
+ spin_unlock_bh(&session->lock);
+
+- iscsi_unblock_session(session->cls_session);
+- wake_up(&conn->ehwait);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+@@ -2264,23 +1668,52 @@ EXPORT_SYMBOL_GPL(iscsi_conn_start);
+ static void
+ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+ {
+- struct iscsi_task *task, *tmp;
++ struct iscsi_mgmt_task *mtask, *tmp;
+
+ /* handle pending */
+- list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
+- debug_scsi("flushing pending mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ while (__kfifo_get(conn->immqueue, (void*)&mtask, sizeof(void*)) ||
++ __kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
++ if (mtask == conn->login_mtask)
++ continue;
++ debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
+ }
+
+ /* handle running */
+- list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
+- debug_scsi("flushing running mgmt task itt 0x%x\n", task->itt);
+- /* release ref from prep task */
+- __iscsi_put_task(task);
++ list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
++ debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
++ list_del(&mtask->running);
++
++ if (mtask == conn->login_mtask)
++ continue;
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
++ }
++
++ conn->mtask = NULL;
++}
++
++/* Fail commands. Mutex and session lock held and recv side suspended */
++static void fail_all_commands(struct iscsi_conn *conn)
++{
++ struct iscsi_cmd_task *ctask, *tmp;
++
++ /* flush pending */
++ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
++ debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
++ ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
++ }
++
++ /* fail all other running */
++ list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
++ debug_scsi("failing in progress sc %p itt 0x%x\n",
++ ctask->sc, ctask->itt);
++ fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ }
+
+- conn->task = NULL;
++ conn->ctask = NULL;
+ }
+
+ static void iscsi_start_session_recovery(struct iscsi_session *session,
+@@ -2288,13 +1721,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ {
+ int old_stop_stage;
+
+- del_timer_sync(&conn->transport_timer);
+-
+- mutex_lock(&session->eh_mutex);
+ spin_lock_bh(&session->lock);
+ if (conn->stop_stage == STOP_CONN_TERM) {
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
+ return;
+ }
+
+@@ -2311,9 +1740,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ old_stop_stage = conn->stop_stage;
+ conn->stop_stage = flag;
+ conn->c_stage = ISCSI_CONN_STOPPED;
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ spin_unlock_bh(&session->lock);
+
+- iscsi_suspend_tx(conn);
++ write_lock_bh(conn->recv_lock);
++ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
++ write_unlock_bh(conn->recv_lock);
++
++ mutex_lock(&conn->xmitmutex);
+ /*
+ * for connection level recovery we should not calculate
+ * header digest. conn->hdr_size used for optimization
+@@ -2326,7 +1760,7 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ if (session->state == ISCSI_STATE_IN_RECOVERY &&
+ old_stop_stage != STOP_CONN_RECOVER) {
+ debug_scsi("blocking session\n");
+- iscsi_block_session(session->cls_session);
++ iscsi_block_session(session_to_cls(session));
+ }
+ }
+
+@@ -2334,11 +1768,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
+ * flush queues.
+ */
+ spin_lock_bh(&session->lock);
+- fail_all_commands(conn, -1,
+- STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
++ fail_all_commands(conn);
+ flush_control_queues(session, conn);
+ spin_unlock_bh(&session->lock);
+- mutex_unlock(&session->eh_mutex);
++
++ mutex_unlock(&conn->xmitmutex);
+ }
+
+ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+@@ -2352,8 +1786,7 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+ iscsi_start_session_recovery(session, conn, flag);
+ break;
+ default:
+- iscsi_conn_printk(KERN_ERR, conn,
+- "invalid stop flag %d\n", flag);
++ printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
+ }
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+@@ -2361,7 +1794,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_stop);
+ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn, int is_leading)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct iscsi_session *session = class_to_transport_session(cls_session);
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ spin_lock_bh(&session->lock);
+@@ -2387,21 +1820,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ uint32_t value;
+
+ switch(param) {
+- case ISCSI_PARAM_FAST_ABORT:
+- sscanf(buf, "%d", &session->fast_abort);
+- break;
+- case ISCSI_PARAM_ABORT_TMO:
+- sscanf(buf, "%d", &session->abort_timeout);
+- break;
+- case ISCSI_PARAM_LU_RESET_TMO:
+- sscanf(buf, "%d", &session->lu_reset_timeout);
+- break;
+- case ISCSI_PARAM_PING_TMO:
+- sscanf(buf, "%d", &conn->ping_timeout);
+- break;
+- case ISCSI_PARAM_RECV_TMO:
+- sscanf(buf, "%d", &conn->recv_timeout);
+- break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ sscanf(buf, "%d", &conn->max_recv_dlength);
+ break;
+@@ -2449,30 +1867,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ case ISCSI_PARAM_EXP_STATSN:
+ sscanf(buf, "%u", &conn->exp_statsn);
+ break;
+- case ISCSI_PARAM_USERNAME:
+- kfree(session->username);
+- session->username = kstrdup(buf, GFP_KERNEL);
+- if (!session->username)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_USERNAME_IN:
+- kfree(session->username_in);
+- session->username_in = kstrdup(buf, GFP_KERNEL);
+- if (!session->username_in)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_PASSWORD:
+- kfree(session->password);
+- session->password = kstrdup(buf, GFP_KERNEL);
+- if (!session->password)
+- return -ENOMEM;
+- break;
+- case ISCSI_PARAM_PASSWORD_IN:
+- kfree(session->password_in);
+- session->password_in = kstrdup(buf, GFP_KERNEL);
+- if (!session->password_in)
+- return -ENOMEM;
+- break;
+ case ISCSI_PARAM_TARGET_NAME:
+ /* this should not change between logins */
+ if (session->targetname)
+@@ -2500,14 +1894,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ if (!conn->persistent_address)
+ return -ENOMEM;
+ break;
+- case ISCSI_PARAM_IFACE_NAME:
+- if (!session->ifacename)
+- session->ifacename = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- session->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2519,19 +1905,11 @@ EXPORT_SYMBOL_GPL(iscsi_set_param);
+ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf)
+ {
+- struct iscsi_session *session = cls_session->dd_data;
++ struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
++ struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ int len;
+
+ switch(param) {
+- case ISCSI_PARAM_FAST_ABORT:
+- len = sprintf(buf, "%d\n", session->fast_abort);
+- break;
+- case ISCSI_PARAM_ABORT_TMO:
+- len = sprintf(buf, "%d\n", session->abort_timeout);
+- break;
+- case ISCSI_PARAM_LU_RESET_TMO:
+- len = sprintf(buf, "%d\n", session->lu_reset_timeout);
+- break;
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ len = sprintf(buf, "%d\n", session->initial_r2t_en);
+ break;
+@@ -2562,27 +1940,6 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ case ISCSI_PARAM_TPGT:
+ len = sprintf(buf, "%d\n", session->tpgt);
+ break;
+- case ISCSI_PARAM_USERNAME:
+- len = sprintf(buf, "%s\n", session->username);
+- break;
+- case ISCSI_PARAM_USERNAME_IN:
+- len = sprintf(buf, "%s\n", session->username_in);
+- break;
+- case ISCSI_PARAM_PASSWORD:
+- len = sprintf(buf, "%s\n", session->password);
+- break;
+- case ISCSI_PARAM_PASSWORD_IN:
+- len = sprintf(buf, "%s\n", session->password_in);
+- break;
+- case ISCSI_PARAM_IFACE_NAME:
+- len = sprintf(buf, "%s\n", session->ifacename);
+- break;
+- case ISCSI_PARAM_INITIATOR_NAME:
+- if (!session->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", session->initiatorname);
+- break;
+ default:
+ return -ENOSYS;
+ }
+@@ -2598,12 +1955,6 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ int len;
+
+ switch(param) {
+- case ISCSI_PARAM_PING_TMO:
+- len = sprintf(buf, "%u\n", conn->ping_timeout);
+- break;
+- case ISCSI_PARAM_RECV_TMO:
+- len = sprintf(buf, "%u\n", conn->recv_timeout);
+- break;
+ case ISCSI_PARAM_MAX_RECV_DLENGTH:
+ len = sprintf(buf, "%u\n", conn->max_recv_dlength);
+ break;
+@@ -2639,72 +1990,6 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
+
+-int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+- char *buf)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
+- int len;
+-
+- switch (param) {
+- case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- len = sprintf(buf, "%s\n", "default");
+- else
+- len = sprintf(buf, "%s\n", ihost->netdev);
+- break;
+- case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- len = sprintf(buf, "%s\n", "default");
+- else
+- len = sprintf(buf, "%s\n", ihost->hwaddress);
+- break;
+- case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n", ihost->initiatorname);
+- break;
+- case ISCSI_HOST_PARAM_IPADDRESS:
+- if (!strlen(ihost->local_address))
+- len = sprintf(buf, "%s\n", "unknown");
+- else
+- len = sprintf(buf, "%s\n",
+- ihost->local_address);
+- break;
+- default:
+- return -ENOSYS;
+- }
+-
+- return len;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_get_param);
+-
+-int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
+- char *buf, int buflen)
+-{
+- struct iscsi_host *ihost = shost_priv(shost);
+-
+- switch (param) {
+- case ISCSI_HOST_PARAM_NETDEV_NAME:
+- if (!ihost->netdev)
+- ihost->netdev = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_HOST_PARAM_HWADDRESS:
+- if (!ihost->hwaddress)
+- ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
+- break;
+- case ISCSI_HOST_PARAM_INITIATOR_NAME:
+- if (!ihost->initiatorname)
+- ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
+- break;
+- default:
+- return -ENOSYS;
+- }
+-
+- return 0;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_set_param);
+-
+ MODULE_AUTHOR("Mike Christie");
+ MODULE_DESCRIPTION("iSCSI library functions");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 043c392..caf1836 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -30,28 +30,26 @@
+ #include <scsi/scsi_transport_iscsi.h>
+ #include <scsi/iscsi_if.h>
+
+-#define ISCSI_SESSION_ATTRS 21
+-#define ISCSI_CONN_ATTRS 13
+-#define ISCSI_HOST_ATTRS 4
+-
+-#define ISCSI_TRANSPORT_VERSION "2.0-870"
++#define ISCSI_SESSION_ATTRS 11
++#define ISCSI_CONN_ATTRS 11
++#define ISCSI_HOST_ATTRS 0
++#define ISCSI_TRANSPORT_VERSION "2.0-724"
+
+ struct iscsi_internal {
+ int daemon_pid;
+ struct scsi_transport_template t;
+ struct iscsi_transport *iscsi_transport;
+ struct list_head list;
+- struct device dev;
++ struct class_device cdev;
+
+- struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
++ struct class_device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
+ struct transport_container conn_cont;
+- struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
++ struct class_device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
+ struct transport_container session_cont;
+- struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
++ struct class_device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
+ };
+
+ static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
+-static struct workqueue_struct *iscsi_eh_timer_workq;
+
+ /*
+ * list of registered transports and lock that must
+@@ -64,12 +62,12 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define to_iscsi_internal(tmpl) \
+ container_of(tmpl, struct iscsi_internal, t)
+
+-#define dev_to_iscsi_internal(_dev) \
+- container_of(_dev, struct iscsi_internal, dev)
++#define cdev_to_iscsi_internal(_cdev) \
++ container_of(_cdev, struct iscsi_internal, cdev)
+
+-static void iscsi_transport_release(struct device *dev)
++static void iscsi_transport_release(struct class_device *cdev)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ kfree(priv);
+ }
+
+@@ -79,33 +77,37 @@ static void iscsi_transport_release(struct device *dev)
+ */
+ static struct class iscsi_transport_class = {
+ .name = "iscsi_transport",
+- .dev_release = iscsi_transport_release,
++ .release = iscsi_transport_release,
+ };
+
+ static ssize_t
+-show_transport_handle(struct device *dev, struct device_attribute *attr,
+- char *buf)
++show_transport_handle(struct class_device *cdev, char *buf)
+ {
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev);
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+ return sprintf(buf, "%llu\n", (unsigned long long)iscsi_handle(priv->iscsi_transport));
+ }
+-static DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
++static CLASS_DEVICE_ATTR(handle, S_IRUGO, show_transport_handle, NULL);
+
+ #define show_transport_attr(name, format) \
+ static ssize_t \
+-show_transport_##name(struct device *dev, \
+- struct device_attribute *attr,char *buf) \
++show_transport_##name(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_internal *priv = dev_to_iscsi_internal(dev); \
++ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev); \
+ return sprintf(buf, format"\n", priv->iscsi_transport->name); \
+ } \
+-static DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
++static CLASS_DEVICE_ATTR(name, S_IRUGO, show_transport_##name, NULL);
+
+ show_transport_attr(caps, "0x%x");
++show_transport_attr(max_lun, "%d");
++show_transport_attr(max_conn, "%d");
++show_transport_attr(max_cmd_len, "%d");
+
+ static struct attribute *iscsi_transport_attrs[] = {
+- &dev_attr_handle.attr,
+- &dev_attr_caps.attr,
++ &class_device_attr_handle.attr,
++ &class_device_attr_caps.attr,
++ &class_device_attr_max_lun.attr,
++ &class_device_attr_max_conn.attr,
++ &class_device_attr_max_cmd_len.attr,
+ NULL,
+ };
+
+@@ -113,163 +115,22 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-/*
+- * iSCSI endpoint attrs
+- */
+-#define iscsi_dev_to_endpoint(_dev) \
+- container_of(_dev, struct iscsi_endpoint, dev)
+-
+-#define ISCSI_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
+- __ATTR(_name,_mode,_show,_store)
+-
+-static void iscsi_endpoint_release(struct device *dev)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- kfree(ep);
+-}
+-
+-static struct class iscsi_endpoint_class = {
+- .name = "iscsi_endpoint",
+- .dev_release = iscsi_endpoint_release,
+-};
+-
+-static ssize_t
+-show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- return sprintf(buf, "%u\n", ep->id);
+-}
+-static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
+-
+-static struct attribute *iscsi_endpoint_attrs[] = {
+- &dev_attr_ep_handle.attr,
+- NULL,
+-};
+-
+-static struct attribute_group iscsi_endpoint_group = {
+- .attrs = iscsi_endpoint_attrs,
+-};
+-
+-#define ISCSI_MAX_EPID -1
+-
+-static int iscsi_match_epid(struct device *dev, void *data)
+-{
+- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+- unsigned int *epid = (unsigned int *) data;
+-
+- return *epid == ep->id;
+-}
+-
+-struct iscsi_endpoint *
+-iscsi_create_endpoint(int dd_size)
+-{
+- struct device *dev;
+- struct iscsi_endpoint *ep;
+- unsigned int id;
+- int err;
+-
+- for (id = 1; id < ISCSI_MAX_EPID; id++) {
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
+- iscsi_match_epid);
+- if (!dev)
+- break;
+- }
+- if (id == ISCSI_MAX_EPID) {
+- printk(KERN_ERR "Too many connections. Max supported %u\n",
+- ISCSI_MAX_EPID - 1);
+- return NULL;
+- }
+-
+- ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
+- if (!ep)
+- return NULL;
+-
+- ep->id = id;
+- ep->dev.class = &iscsi_endpoint_class;
+- snprintf(ep->dev.bus_id, BUS_ID_SIZE, "ep-%u", id);
+- err = device_register(&ep->dev);
+- if (err)
+- goto free_ep;
+-
+- err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- if (err)
+- goto unregister_dev;
+-
+- if (dd_size)
+- ep->dd_data = &ep[1];
+- return ep;
+-
+-unregister_dev:
+- device_unregister(&ep->dev);
+- return NULL;
+-
+-free_ep:
+- kfree(ep);
+- return NULL;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_create_endpoint);
+-
+-void iscsi_destroy_endpoint(struct iscsi_endpoint *ep)
+-{
+- sysfs_remove_group(&ep->dev.kobj, &iscsi_endpoint_group);
+- device_unregister(&ep->dev);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint);
+-
+-struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
+-{
+- struct iscsi_endpoint *ep;
+- struct device *dev;
+-
+- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
+- iscsi_match_epid);
+- if (!dev)
+- return NULL;
+-
+- ep = iscsi_dev_to_endpoint(dev);
+- /*
+- * we can drop this now because the interface will prevent
+- * removals and lookups from racing.
+- */
+- put_device(dev);
+- return ep;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
+-
+ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
++ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+- atomic_set(&ihost->nr_scans, 0);
++ INIT_LIST_HEAD(&ihost->sessions);
+ mutex_init(&ihost->mutex);
+-
+- snprintf(ihost->scan_workq_name, sizeof(ihost->scan_workq_name),
+- "iscsi_scan_%d", shost->host_no);
+- ihost->scan_workq = create_singlethread_workqueue(
+- ihost->scan_workq_name);
+- if (!ihost->scan_workq)
+- return -ENOMEM;
+- return 0;
+-}
+-
+-static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
+- struct device *cdev)
+-{
+- struct Scsi_Host *shost = dev_to_shost(dev);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+-
+- destroy_workqueue(ihost->scan_workq);
+ return 0;
+ }
+
+ static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+ "iscsi_host",
+ iscsi_setup_host,
+- iscsi_remove_host,
++ NULL,
+ NULL);
+
+ static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+@@ -340,54 +201,6 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
+ * The following functions can be used by LLDs that allocate
+ * their own scsi_hosts or by software iscsi LLDs
+ */
+-static struct {
+- int value;
+- char *name;
+-} iscsi_session_state_names[] = {
+- { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
+- { ISCSI_SESSION_FAILED, "FAILED" },
+- { ISCSI_SESSION_FREE, "FREE" },
+-};
+-
+-static const char *iscsi_session_state_name(int state)
+-{
+- int i;
+- char *name = NULL;
+-
+- for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
+- if (iscsi_session_state_names[i].value == state) {
+- name = iscsi_session_state_names[i].name;
+- break;
+- }
+- }
+- return name;
+-}
+-
+-int iscsi_session_chkready(struct iscsi_cls_session *session)
+-{
+- unsigned long flags;
+- int err;
+-
+- spin_lock_irqsave(&session->lock, flags);
+- switch (session->state) {
+- case ISCSI_SESSION_LOGGED_IN:
+- err = 0;
+- break;
+- case ISCSI_SESSION_FAILED:
+- err = DID_IMM_RETRY << 16;
+- break;
+- case ISCSI_SESSION_FREE:
+- err = DID_NO_CONNECT << 16;
+- break;
+- default:
+- err = DID_NO_CONNECT << 16;
+- break;
+- }
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-EXPORT_SYMBOL_GPL(iscsi_session_chkready);
+-
+ static void iscsi_session_release(struct device *dev)
+ {
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
+@@ -403,114 +216,22 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_iter_session_fn(struct device *dev, void *data)
+-{
+- void (* fn) (struct iscsi_cls_session *) = data;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+- fn(iscsi_dev_to_session(dev));
+- return 0;
+-}
+-
+-void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *))
+-{
+- device_for_each_child(&shost->shost_gendev, fn,
+- iscsi_iter_session_fn);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_host_for_each_session);
+-
+-/**
+- * iscsi_scan_finished - helper to report when running scans are done
+- * @shost: scsi host
+- * @time: scan run time
+- *
+- * This function can be used by drives like qla4xxx to report to the scsi
+- * layer when the scans it kicked off at module load time are done.
+- */
+-int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+-{
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- /*
+- * qla4xxx will have kicked off some session unblocks before calling
+- * scsi_scan_host, so just wait for them to complete.
+- */
+- return !atomic_read(&ihost->nr_scans);
+-}
+-EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+-
+-struct iscsi_scan_data {
+- unsigned int channel;
+- unsigned int id;
+- unsigned int lun;
+-};
+-
+-static int iscsi_user_scan_session(struct device *dev, void *data)
++static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
++ uint id, uint lun)
+ {
+- struct iscsi_scan_data *scan_data = data;
++ struct iscsi_host *ihost = shost->shost_data;
+ struct iscsi_cls_session *session;
+- struct Scsi_Host *shost;
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- shost = iscsi_session_to_shost(session);
+- ihost = shost->shost_data;
+
+ mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->state != ISCSI_SESSION_LOGGED_IN) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return 0;
+- }
+- id = session->target_id;
+- spin_unlock_irqrestore(&session->lock, flags);
+-
+- if (id != ISCSI_MAX_TARGET) {
+- if ((scan_data->channel == SCAN_WILD_CARD ||
+- scan_data->channel == 0) &&
+- (scan_data->id == SCAN_WILD_CARD ||
+- scan_data->id == id))
+- scsi_scan_target(&session->dev, 0, id,
+- scan_data->lun, 1);
++ list_for_each_entry(session, &ihost->sessions, host_list) {
++ if ((channel == SCAN_WILD_CARD || channel == 0) &&
++ (id == SCAN_WILD_CARD || id == session->target_id))
++ scsi_scan_target(&session->dev, 0,
++ session->target_id, lun, 1);
+ }
+ mutex_unlock(&ihost->mutex);
+- return 0;
+-}
+-
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_scan_data scan_data;
+
+- scan_data.channel = channel;
+- scan_data.id = id;
+- scan_data.lun = lun;
+-
+- return device_for_each_child(&shost->shost_gendev, &scan_data,
+- iscsi_user_scan_session);
+-}
+-
+-static void iscsi_scan_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session, scan_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- struct iscsi_scan_data scan_data;
+-
+- scan_data.channel = 0;
+- scan_data.id = SCAN_WILD_CARD;
+- scan_data.lun = SCAN_WILD_CARD;
+-
+- iscsi_user_scan_session(&session->dev, &scan_data);
+- atomic_dec(&ihost->nr_scans);
++ return 0;
+ }
+
+ static void session_recovery_timedout(struct work_struct *work)
+@@ -518,24 +239,9 @@ static void session_recovery_timedout(struct work_struct *work)
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session,
+ recovery_work.work);
+- unsigned long flags;
+-
+- iscsi_cls_session_printk(KERN_INFO, session,
+- "session recovery timed out after %d secs\n",
+- session->recovery_tmo);
+
+- spin_lock_irqsave(&session->lock, flags);
+- switch (session->state) {
+- case ISCSI_SESSION_FAILED:
+- session->state = ISCSI_SESSION_FREE;
+- break;
+- case ISCSI_SESSION_LOGGED_IN:
+- case ISCSI_SESSION_FREE:
+- /* we raced with the unblock's flush */
+- spin_unlock_irqrestore(&session->lock, flags);
+- return;
+- }
+- spin_unlock_irqrestore(&session->lock, flags);
++ dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
++ "out after %d secs\n", session->recovery_tmo);
+
+ if (session->transport->session_recovery_timedout)
+ session->transport->session_recovery_timedout(session);
+@@ -543,201 +249,73 @@ static void session_recovery_timedout(struct work_struct *work)
+ scsi_target_unblock(&session->dev);
+ }
+
+-static void __iscsi_unblock_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- unblock_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+-
+- /*
+- * The recovery and unblock work get run from the same workqueue,
+- * so try to cancel it if it was going to run after this unblock.
+- */
+- cancel_delayed_work(&session->recovery_work);
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_LOGGED_IN;
+- spin_unlock_irqrestore(&session->lock, flags);
+- /* start IO */
+- scsi_target_unblock(&session->dev);
+- /*
+- * Only do kernel scanning if the driver is properly hooked into
+- * the async scanning code (drivers like iscsi_tcp do login and
+- * scanning from userspace).
+- */
+- if (shost->hostt->scan_finished) {
+- if (queue_work(ihost->scan_workq, &session->scan_work))
+- atomic_inc(&ihost->nr_scans);
+- }
+-}
+-
+-/**
+- * iscsi_unblock_session - set a session as logged in and start IO.
+- * @session: iscsi session
+- *
+- * Mark a session as ready to accept IO.
+- */
+ void iscsi_unblock_session(struct iscsi_cls_session *session)
+ {
+- queue_work(iscsi_eh_timer_workq, &session->unblock_work);
+- /*
+- * make sure all the events have completed before tell the driver
+- * it is safe
+- */
+- flush_workqueue(iscsi_eh_timer_workq);
++ if (!cancel_delayed_work(&session->recovery_work))
++ flush_scheduled_work();
++ scsi_target_unblock(&session->dev);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_unblock_session);
+
+-static void __iscsi_block_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- block_work);
+- unsigned long flags;
+-
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_FAILED;
+- spin_unlock_irqrestore(&session->lock, flags);
+- scsi_target_block(&session->dev);
+- queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
+- session->recovery_tmo * HZ);
+-}
+-
+ void iscsi_block_session(struct iscsi_cls_session *session)
+ {
+- queue_work(iscsi_eh_timer_workq, &session->block_work);
++ scsi_target_block(&session->dev);
++ schedule_delayed_work(&session->recovery_work,
++ session->recovery_tmo * HZ);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_block_session);
+
+-static void __iscsi_unbind_session(struct work_struct *work)
+-{
+- struct iscsi_cls_session *session =
+- container_of(work, struct iscsi_cls_session,
+- unbind_work);
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+-
+- /* Prevent new scans and make sure scanning is not in progress */
+- mutex_lock(&ihost->mutex);
+- spin_lock_irqsave(&session->lock, flags);
+- if (session->target_id == ISCSI_MAX_TARGET) {
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+- return;
+- }
+- session->target_id = ISCSI_MAX_TARGET;
+- spin_unlock_irqrestore(&session->lock, flags);
+- mutex_unlock(&ihost->mutex);
+-
+- scsi_remove_target(&session->dev);
+- iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+-}
+-
+-static int iscsi_unbind_session(struct iscsi_cls_session *session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+-
+- return queue_work(ihost->scan_workq, &session->unbind_work);
+-}
+-
+ struct iscsi_cls_session *
+-iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size)
++iscsi_alloc_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport)
+ {
+ struct iscsi_cls_session *session;
+
+- session = kzalloc(sizeof(*session) + dd_size,
++ session = kzalloc(sizeof(*session) + transport->sessiondata_size,
+ GFP_KERNEL);
+ if (!session)
+ return NULL;
+
+ session->transport = transport;
+ session->recovery_tmo = 120;
+- session->state = ISCSI_SESSION_FREE;
+ INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
++ INIT_LIST_HEAD(&session->host_list);
+ INIT_LIST_HEAD(&session->sess_list);
+- INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
+- INIT_WORK(&session->block_work, __iscsi_block_session);
+- INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
+- INIT_WORK(&session->scan_work, iscsi_scan_session);
+- spin_lock_init(&session->lock);
+
+ /* this is released in the dev's release function */
+ scsi_host_get(shost);
+ session->dev.parent = &shost->shost_gendev;
+ session->dev.release = iscsi_session_release;
+ device_initialize(&session->dev);
+- if (dd_size)
++ if (transport->sessiondata_size)
+ session->dd_data = &session[1];
+ return session;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
+
+-static int iscsi_get_next_target_id(struct device *dev, void *data)
+-{
+- struct iscsi_cls_session *session;
+- unsigned long flags;
+- int err = 0;
+-
+- if (!iscsi_is_session_dev(dev))
+- return 0;
+-
+- session = iscsi_dev_to_session(dev);
+- spin_lock_irqsave(&session->lock, flags);
+- if (*((unsigned int *) data) == session->target_id)
+- err = -EEXIST;
+- spin_unlock_irqrestore(&session->lock, flags);
+- return err;
+-}
+-
+ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost;
+- unsigned long flags;
+- unsigned int id = target_id;
++ struct iscsi_host *ihost;
+ int err;
+
+ ihost = shost->shost_data;
+ session->sid = atomic_add_return(1, &iscsi_session_nr);
+-
+- if (id == ISCSI_MAX_TARGET) {
+- for (id = 0; id < ISCSI_MAX_TARGET; id++) {
+- err = device_for_each_child(&shost->shost_gendev, &id,
+- iscsi_get_next_target_id);
+- if (!err)
+- break;
+- }
+-
+- if (id == ISCSI_MAX_TARGET) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Too many iscsi targets. Max "
+- "number of targets is %d.\n",
+- ISCSI_MAX_TARGET - 1);
+- goto release_host;
+- }
+- }
+- session->target_id = id;
++ session->target_id = target_id;
+
+ snprintf(session->dev.bus_id, BUS_ID_SIZE, "session%u",
+ session->sid);
+ err = device_add(&session->dev);
+ if (err) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "could not register session's dev\n");
++ dev_printk(KERN_ERR, &session->dev, "iscsi: could not "
++ "register session's dev\n");
+ goto release_host;
+ }
+ transport_register_device(&session->dev);
+
+- spin_lock_irqsave(&sesslock, flags);
+- list_add(&session->sess_list, &sesslist);
+- spin_unlock_irqrestore(&sesslock, flags);
+-
+- iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
++ mutex_lock(&ihost->mutex);
++ list_add(&session->host_list, &ihost->sessions);
++ mutex_unlock(&ihost->mutex);
+ return 0;
+
+ release_host:
+@@ -750,18 +328,17 @@ EXPORT_SYMBOL_GPL(iscsi_add_session);
+ * iscsi_create_session - create iscsi class session
+ * @shost: scsi host
+ * @transport: iscsi transport
+- * @dd_size: private driver data size
+- * @target_id: which target
+ *
+ * This can be called from a LLD or iscsi_transport.
+- */
++ **/
+ struct iscsi_cls_session *
+-iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+- int dd_size, unsigned int target_id)
++iscsi_create_session(struct Scsi_Host *shost,
++ struct iscsi_transport *transport,
++ unsigned int target_id)
+ {
+ struct iscsi_cls_session *session;
+
+- session = iscsi_alloc_session(shost, transport, dd_size);
++ session = iscsi_alloc_session(shost, transport);
+ if (!session)
+ return NULL;
+
+@@ -773,65 +350,19 @@ iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
+ }
+ EXPORT_SYMBOL_GPL(iscsi_create_session);
+
+-static void iscsi_conn_release(struct device *dev)
+-{
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
+- struct device *parent = conn->dev.parent;
+-
+- kfree(conn);
+- put_device(parent);
+-}
+-
+-static int iscsi_is_conn_dev(const struct device *dev)
+-{
+- return dev->release == iscsi_conn_release;
+-}
+-
+-static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data)
+-{
+- if (!iscsi_is_conn_dev(dev))
+- return 0;
+- return iscsi_destroy_conn(iscsi_dev_to_conn(dev));
+-}
+-
+ void iscsi_remove_session(struct iscsi_cls_session *session)
+ {
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+- struct iscsi_cls_host *ihost = shost->shost_data;
+- unsigned long flags;
+- int err;
+-
+- spin_lock_irqsave(&sesslock, flags);
+- list_del(&session->sess_list);
+- spin_unlock_irqrestore(&sesslock, flags);
++ struct iscsi_host *ihost = shost->shost_data;
+
+- /* make sure there are no blocks/unblocks queued */
+- flush_workqueue(iscsi_eh_timer_workq);
+- /* make sure the timedout callout is not running */
+ if (!cancel_delayed_work(&session->recovery_work))
+- flush_workqueue(iscsi_eh_timer_workq);
+- /*
+- * If we are blocked let commands flow again. The lld or iscsi
+- * layer should set up the queuecommand to fail commands.
+- * We assume that LLD will not be calling block/unblock while
+- * removing the session.
+- */
+- spin_lock_irqsave(&session->lock, flags);
+- session->state = ISCSI_SESSION_FREE;
+- spin_unlock_irqrestore(&session->lock, flags);
++ flush_scheduled_work();
+
+- scsi_target_unblock(&session->dev);
+- /* flush running scans then delete devices */
+- flush_workqueue(ihost->scan_workq);
+- __iscsi_unbind_session(&session->unbind_work);
++ mutex_lock(&ihost->mutex);
++ list_del(&session->host_list);
++ mutex_unlock(&ihost->mutex);
+
+- /* hw iscsi may not have removed all connections from session */
+- err = device_for_each_child(&session->dev, NULL,
+- iscsi_iter_destroy_conn_fn);
+- if (err)
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Could not delete all connections "
+- "for session. Error %d.\n", err);
++ scsi_remove_target(&session->dev);
+
+ transport_unregister_device(&session->dev);
+ device_del(&session->dev);
+@@ -840,9 +371,9 @@ EXPORT_SYMBOL_GPL(iscsi_remove_session);
+
+ void iscsi_free_session(struct iscsi_cls_session *session)
+ {
+- iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
+ put_device(&session->dev);
+ }
++
+ EXPORT_SYMBOL_GPL(iscsi_free_session);
+
+ /**
+@@ -851,7 +382,7 @@ EXPORT_SYMBOL_GPL(iscsi_free_session);
+ *
+ * Can be called by a LLD or iscsi_transport. There must not be
+ * any running connections.
+- */
++ **/
+ int iscsi_destroy_session(struct iscsi_cls_session *session)
+ {
+ iscsi_remove_session(session);
+@@ -860,10 +391,23 @@ int iscsi_destroy_session(struct iscsi_cls_session *session)
+ }
+ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+
++static void iscsi_conn_release(struct device *dev)
++{
++ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
++ struct device *parent = conn->dev.parent;
++
++ kfree(conn);
++ put_device(parent);
++}
++
++static int iscsi_is_conn_dev(const struct device *dev)
++{
++ return dev->release == iscsi_conn_release;
++}
++
+ /**
+ * iscsi_create_conn - create iscsi class connection
+ * @session: iscsi cls session
+- * @dd_size: private driver data size
+ * @cid: connection id
+ *
+ * This can be called from a LLD or iscsi_transport. The connection
+@@ -874,19 +418,19 @@ EXPORT_SYMBOL_GPL(iscsi_destroy_session);
+ * for software iscsi we could be trying to preallocate a connection struct
+ * in which case there could be two connection structs and cid would be
+ * non-zero.
+- */
++ **/
+ struct iscsi_cls_conn *
+-iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
++iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
+ {
+ struct iscsi_transport *transport = session->transport;
+ struct iscsi_cls_conn *conn;
+- unsigned long flags;
+ int err;
+
+- conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL);
++ conn = kzalloc(sizeof(*conn) + transport->conndata_size, GFP_KERNEL);
+ if (!conn)
+ return NULL;
+- if (dd_size)
++
++ if (transport->conndata_size)
+ conn->dd_data = &conn[1];
+
+ INIT_LIST_HEAD(&conn->conn_list);
+@@ -903,16 +447,11 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
+ conn->dev.release = iscsi_conn_release;
+ err = device_register(&conn->dev);
+ if (err) {
+- iscsi_cls_session_printk(KERN_ERR, session, "could not "
+- "register connection's dev\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register "
++ "connection's dev\n");
+ goto release_parent_ref;
+ }
+ transport_register_device(&conn->dev);
+-
+- spin_lock_irqsave(&connlock, flags);
+- list_add(&conn->conn_list, &connlist);
+- conn->active = 1;
+- spin_unlock_irqrestore(&connlock, flags);
+ return conn;
+
+ release_parent_ref:
+@@ -926,23 +465,17 @@ EXPORT_SYMBOL_GPL(iscsi_create_conn);
+
+ /**
+ * iscsi_destroy_conn - destroy iscsi class connection
+- * @conn: iscsi cls session
++ * @session: iscsi cls session
+ *
+ * This can be called from a LLD or iscsi_transport.
+- */
++ **/
+ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
+ {
+- unsigned long flags;
+-
+- spin_lock_irqsave(&connlock, flags);
+- conn->active = 0;
+- list_del(&conn->conn_list);
+- spin_unlock_irqrestore(&connlock, flags);
+-
+ transport_unregister_device(&conn->dev);
+ device_unregister(&conn->dev);
+ return 0;
+ }
++
+ EXPORT_SYMBOL_GPL(iscsi_destroy_conn);
+
+ /*
+@@ -1011,8 +544,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
+- iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
+- "control PDU: OOM\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
++ "control PDU: OOM\n");
+ return -ENOMEM;
+ }
+
+@@ -1045,8 +578,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+- iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
+- "conn error (%d)\n", error);
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored "
++ "conn error (%d)\n", error);
+ return;
+ }
+
+@@ -1060,8 +593,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
+
+ iscsi_broadcast_skb(skb, GFP_ATOMIC);
+
+- iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
+- error);
++ dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
++ error);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_conn_error);
+
+@@ -1076,10 +609,12 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
+ int t = done ? NLMSG_DONE : type;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+- if (!skb) {
+- printk(KERN_ERR "Could not allocate skb to send reply.\n");
+- return -ENOMEM;
+- }
++ /*
++ * FIXME:
++ * user is supposed to react on iferror == -ENOMEM;
++ * see iscsi_if_rx().
++ */
++ BUG_ON(!skb);
+
+ nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
+ nlh->nlmsg_flags = flags;
+@@ -1116,8 +651,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+
+ skbstat = alloc_skb(len, GFP_ATOMIC);
+ if (!skbstat) {
+- iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
+- "deliver stats: OOM\n");
++ dev_printk(KERN_ERR, &conn->dev, "iscsi: can not "
++ "deliver stats: OOM\n");
+ return -ENOMEM;
+ }
+
+@@ -1152,87 +687,145 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
+ }
+
+ /**
+- * iscsi_session_event - send session destr. completion event
+- * @session: iscsi class session
+- * @event: type of event
+- */
+-int iscsi_session_event(struct iscsi_cls_session *session,
+- enum iscsi_uevent_e event)
++ * iscsi_if_destroy_session_done - send session destr. completion event
++ * @conn: last connection for session
++ *
++ * This is called by HW iscsi LLDs to notify userpsace that its HW has
++ * removed a session.
++ **/
++int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
+ {
+ struct iscsi_internal *priv;
++ struct iscsi_cls_session *session;
+ struct Scsi_Host *shost;
+ struct iscsi_uevent *ev;
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
++ unsigned long flags;
+ int rc, len = NLMSG_SPACE(sizeof(*ev));
+
+- priv = iscsi_if_transport_lookup(session->transport);
++ priv = iscsi_if_transport_lookup(conn->transport);
+ if (!priv)
+ return -EINVAL;
++
++ session = iscsi_dev_to_session(conn->dev.parent);
+ shost = iscsi_session_to_shost(session);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Cannot notify userspace of session "
+- "event %u\n", event);
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event\n");
+ return -ENOMEM;
+ }
+
+ nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+ ev = NLMSG_DATA(nlh);
+- ev->transport_handle = iscsi_handle(session->transport);
++ ev->transport_handle = iscsi_handle(conn->transport);
++ ev->type = ISCSI_KEVENT_DESTROY_SESSION;
++ ev->r.d_session.host_no = shost->host_no;
++ ev->r.d_session.sid = session->sid;
+
+- ev->type = event;
+- switch (event) {
+- case ISCSI_KEVENT_DESTROY_SESSION:
+- ev->r.d_session.host_no = shost->host_no;
+- ev->r.d_session.sid = session->sid;
+- break;
+- case ISCSI_KEVENT_CREATE_SESSION:
+- ev->r.c_session_ret.host_no = shost->host_no;
+- ev->r.c_session_ret.sid = session->sid;
+- break;
+- case ISCSI_KEVENT_UNBIND_SESSION:
+- ev->r.unbind_session.host_no = shost->host_no;
+- ev->r.unbind_session.sid = session->sid;
+- break;
+- default:
+- iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
+- "%u.\n", event);
+- kfree_skb(skb);
++ /*
++ * this will occur if the daemon is not up, so we just warn
++ * the user and when the daemon is restarted it will handle it
++ */
++ rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
++ if (rc < 0)
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session destruction event. Check iscsi daemon\n");
++
++ spin_lock_irqsave(&sesslock, flags);
++ list_del(&session->sess_list);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ spin_lock_irqsave(&connlock, flags);
++ conn->active = 0;
++ list_del(&conn->conn_list);
++ spin_unlock_irqrestore(&connlock, flags);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(iscsi_if_destroy_session_done);
++
++/**
++ * iscsi_if_create_session_done - send session creation completion event
++ * @conn: leading connection for session
++ *
++ * This is called by HW iscsi LLDs to notify userpsace that its HW has
++ * created a session or a existing session is back in the logged in state.
++ **/
++int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
++{
++ struct iscsi_internal *priv;
++ struct iscsi_cls_session *session;
++ struct Scsi_Host *shost;
++ struct iscsi_uevent *ev;
++ struct sk_buff *skb;
++ struct nlmsghdr *nlh;
++ unsigned long flags;
++ int rc, len = NLMSG_SPACE(sizeof(*ev));
++
++ priv = iscsi_if_transport_lookup(conn->transport);
++ if (!priv)
+ return -EINVAL;
++
++ session = iscsi_dev_to_session(conn->dev.parent);
++ shost = iscsi_session_to_shost(session);
++
++ skb = alloc_skb(len, GFP_KERNEL);
++ if (!skb) {
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event\n");
++ return -ENOMEM;
+ }
+
++ nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
++ ev = NLMSG_DATA(nlh);
++ ev->transport_handle = iscsi_handle(conn->transport);
++ ev->type = ISCSI_UEVENT_CREATE_SESSION;
++ ev->r.c_session_ret.host_no = shost->host_no;
++ ev->r.c_session_ret.sid = session->sid;
++
+ /*
+ * this will occur if the daemon is not up, so we just warn
+ * the user and when the daemon is restarted it will handle it
+ */
+ rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
+ if (rc < 0)
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "Cannot notify userspace of session "
+- "event %u. Check iscsi daemon\n",
+- event);
++ dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
++ "session creation event. Check iscsi daemon\n");
++
++ spin_lock_irqsave(&sesslock, flags);
++ list_add(&session->sess_list, &sesslist);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ spin_lock_irqsave(&connlock, flags);
++ list_add(&conn->conn_list, &connlist);
++ conn->active = 1;
++ spin_unlock_irqrestore(&connlock, flags);
+ return rc;
+ }
+-EXPORT_SYMBOL_GPL(iscsi_session_event);
++EXPORT_SYMBOL_GPL(iscsi_if_create_session_done);
+
+ static int
+-iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
+- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
+- uint16_t cmds_max, uint16_t queue_depth)
++iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
+ {
+ struct iscsi_transport *transport = priv->iscsi_transport;
+ struct iscsi_cls_session *session;
+- uint32_t host_no;
++ unsigned long flags;
++ uint32_t hostno;
+
+- session = transport->create_session(ep, cmds_max, queue_depth,
+- initial_cmdsn, &host_no);
++ session = transport->create_session(transport, &priv->t,
++ ev->u.c_session.initial_cmdsn,
++ &hostno);
+ if (!session)
+ return -ENOMEM;
+
+- ev->r.c_session_ret.host_no = host_no;
++ spin_lock_irqsave(&sesslock, flags);
++ list_add(&session->sess_list, &sesslist);
++ spin_unlock_irqrestore(&sesslock, flags);
++
++ ev->r.c_session_ret.host_no = hostno;
+ ev->r.c_session_ret.sid = session->sid;
+ return 0;
+ }
+@@ -1242,34 +835,47 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
+ struct iscsi_cls_conn *conn;
+ struct iscsi_cls_session *session;
++ unsigned long flags;
+
+ session = iscsi_session_lookup(ev->u.c_conn.sid);
+ if (!session) {
+- printk(KERN_ERR "iscsi: invalid session %d.\n",
++ printk(KERN_ERR "iscsi: invalid session %d\n",
+ ev->u.c_conn.sid);
+ return -EINVAL;
+ }
+
+ conn = transport->create_conn(session, ev->u.c_conn.cid);
+ if (!conn) {
+- iscsi_cls_session_printk(KERN_ERR, session,
+- "couldn't create a new connection.");
++ printk(KERN_ERR "iscsi: couldn't create a new "
++ "connection for session %d\n",
++ session->sid);
+ return -ENOMEM;
+ }
+
+ ev->r.c_conn_ret.sid = session->sid;
+ ev->r.c_conn_ret.cid = conn->cid;
++
++ spin_lock_irqsave(&connlock, flags);
++ list_add(&conn->conn_list, &connlist);
++ conn->active = 1;
++ spin_unlock_irqrestore(&connlock, flags);
++
+ return 0;
+ }
+
+ static int
+ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+ {
++ unsigned long flags;
+ struct iscsi_cls_conn *conn;
+
+ conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
+ if (!conn)
+ return -EINVAL;
++ spin_lock_irqsave(&connlock, flags);
++ conn->active = 0;
++ list_del(&conn->conn_list);
++ spin_unlock_irqrestore(&connlock, flags);
+
+ if (transport->destroy_conn)
+ transport->destroy_conn(conn);
+@@ -1307,7 +913,6 @@ static int
+ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev, int msg_type)
+ {
+- struct iscsi_endpoint *ep;
+ struct sockaddr *dst_addr;
+ int rc = 0;
+
+@@ -1317,33 +922,22 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
+ return -EINVAL;
+
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- ep = transport->ep_connect(dst_addr,
+- ev->u.ep_connect.non_blocking);
+- if (IS_ERR(ep))
+- return PTR_ERR(ep);
+-
+- ev->r.ep_connect_ret.handle = ep->id;
++ rc = transport->ep_connect(dst_addr,
++ ev->u.ep_connect.non_blocking,
++ &ev->r.ep_connect_ret.handle);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_POLL:
+ if (!transport->ep_poll)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_poll.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- ev->r.retcode = transport->ep_poll(ep,
++ ev->r.retcode = transport->ep_poll(ev->u.ep_poll.ep_handle,
+ ev->u.ep_poll.timeout_ms);
+ break;
+ case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
+- if (!ep)
+- return -EINVAL;
+-
+- transport->ep_disconnect(ep);
++ transport->ep_disconnect(ev->u.ep_disconnect.ep_handle);
+ break;
+ }
+ return rc;
+@@ -1353,50 +947,15 @@ static int
+ iscsi_tgt_dscvr(struct iscsi_transport *transport,
+ struct iscsi_uevent *ev)
+ {
+- struct Scsi_Host *shost;
+ struct sockaddr *dst_addr;
+- int err;
+
+ if (!transport->tgt_dscvr)
+ return -EINVAL;
+
+- shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
+- if (IS_ERR(shost)) {
+- printk(KERN_ERR "target discovery could not find host no %u\n",
+- ev->u.tgt_dscvr.host_no);
+- return -ENODEV;
+- }
+-
+-
+ dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+- err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
+- ev->u.tgt_dscvr.enable, dst_addr);
+- scsi_host_put(shost);
+- return err;
+-}
+-
+-static int
+-iscsi_set_host_param(struct iscsi_transport *transport,
+- struct iscsi_uevent *ev)
+-{
+- char *data = (char*)ev + sizeof(*ev);
+- struct Scsi_Host *shost;
+- int err;
+-
+- if (!transport->set_host_param)
+- return -ENOSYS;
+-
+- shost = scsi_host_lookup(ev->u.set_host_param.host_no);
+- if (IS_ERR(shost)) {
+- printk(KERN_ERR "set_host_param could not find host no %u\n",
+- ev->u.set_host_param.host_no);
+- return -ENODEV;
+- }
+-
+- err = transport->set_host_param(shost, ev->u.set_host_param.param,
+- data, ev->u.set_host_param.len);
+- scsi_host_put(shost);
+- return err;
++ return transport->tgt_dscvr(ev->u.tgt_dscvr.type,
++ ev->u.tgt_dscvr.host_no,
++ ev->u.tgt_dscvr.enable, dst_addr);
+ }
+
+ static int
+@@ -1408,7 +967,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ struct iscsi_internal *priv;
+ struct iscsi_cls_session *session;
+ struct iscsi_cls_conn *conn;
+- struct iscsi_endpoint *ep = NULL;
++ unsigned long flags;
+
+ priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
+ if (!priv)
+@@ -1422,35 +981,17 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+
+ switch (nlh->nlmsg_type) {
+ case ISCSI_UEVENT_CREATE_SESSION:
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_session.initial_cmdsn,
+- ev->u.c_session.cmds_max,
+- ev->u.c_session.queue_depth);
+- break;
+- case ISCSI_UEVENT_CREATE_BOUND_SESSION:
+- ep = iscsi_lookup_endpoint(ev->u.c_bound_session.ep_handle);
+- if (!ep) {
+- err = -EINVAL;
+- break;
+- }
+-
+- err = iscsi_if_create_session(priv, ep, ev,
+- ev->u.c_bound_session.initial_cmdsn,
+- ev->u.c_bound_session.cmds_max,
+- ev->u.c_bound_session.queue_depth);
++ err = iscsi_if_create_session(priv, ev);
+ break;
+ case ISCSI_UEVENT_DESTROY_SESSION:
+ session = iscsi_session_lookup(ev->u.d_session.sid);
+- if (session)
++ if (session) {
++ spin_lock_irqsave(&sesslock, flags);
++ list_del(&session->sess_list);
++ spin_unlock_irqrestore(&sesslock, flags);
++
+ transport->destroy_session(session);
+- else
+- err = -EINVAL;
+- break;
+- case ISCSI_UEVENT_UNBIND_SESSION:
+- session = iscsi_session_lookup(ev->u.d_session.sid);
+- if (session)
+- iscsi_unbind_session(session);
+- else
++ } else
+ err = -EINVAL;
+ break;
+ case ISCSI_UEVENT_CREATE_CONN:
+@@ -1508,11 +1049,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ case ISCSI_UEVENT_TGT_DSCVR:
+ err = iscsi_tgt_dscvr(transport, ev);
+ break;
+- case ISCSI_UEVENT_SET_HOST_PARAM:
+- err = iscsi_set_host_param(transport, ev);
+- break;
+ default:
+- err = -ENOSYS;
++ err = -EINVAL;
+ break;
+ }
+
+@@ -1521,55 +1059,70 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ }
+
+ /*
+- * Get message from skb. Each message is processed by iscsi_if_recv_msg.
+- * Malformed skbs with wrong lengths or invalid creds are not processed.
++ * Get message from skb (based on rtnetlink_rcv_skb). Each message is
++ * processed by iscsi_if_recv_msg. Malformed skbs with wrong lengths or
++ * invalid creds are discarded silently.
+ */
+ static void
+-iscsi_if_rx(struct sk_buff *skb)
++iscsi_if_rx(struct sock *sk, int len)
+ {
++ struct sk_buff *skb;
++
+ mutex_lock(&rx_queue_mutex);
+- while (skb->len >= NLMSG_SPACE(0)) {
+- int err;
+- uint32_t rlen;
+- struct nlmsghdr *nlh;
+- struct iscsi_uevent *ev;
+-
+- nlh = nlmsg_hdr(skb);
+- if (nlh->nlmsg_len < sizeof(*nlh) ||
+- skb->len < nlh->nlmsg_len) {
+- break;
++ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
++ if (NETLINK_CREDS(skb)->uid) {
++ skb_pull(skb, skb->len);
++ goto free_skb;
+ }
+
+- ev = NLMSG_DATA(nlh);
+- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
+- if (rlen > skb->len)
+- rlen = skb->len;
++ while (skb->len >= NLMSG_SPACE(0)) {
++ int err;
++ uint32_t rlen;
++ struct nlmsghdr *nlh;
++ struct iscsi_uevent *ev;
+
+- err = iscsi_if_recv_msg(skb, nlh);
+- if (err) {
+- ev->type = ISCSI_KEVENT_IF_ERROR;
+- ev->iferror = err;
+- }
+- do {
+- /*
+- * special case for GET_STATS:
+- * on success - sending reply and stats from
+- * inside of if_recv_msg(),
+- * on error - fall through.
+- */
+- if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ nlh = nlmsg_hdr(skb);
++ if (nlh->nlmsg_len < sizeof(*nlh) ||
++ skb->len < nlh->nlmsg_len) {
+ break;
+- err = iscsi_if_send_reply(
+- NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+- nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+- } while (err < 0 && err != -ECONNREFUSED);
+- skb_pull(skb, rlen);
++ }
++
++ ev = NLMSG_DATA(nlh);
++ rlen = NLMSG_ALIGN(nlh->nlmsg_len);
++ if (rlen > skb->len)
++ rlen = skb->len;
++
++ err = iscsi_if_recv_msg(skb, nlh);
++ if (err) {
++ ev->type = ISCSI_KEVENT_IF_ERROR;
++ ev->iferror = err;
++ }
++ do {
++ /*
++ * special case for GET_STATS:
++ * on success - sending reply and stats from
++ * inside of if_recv_msg(),
++ * on error - fall through.
++ */
++ if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
++ break;
++ err = iscsi_if_send_reply(
++ NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
++ nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++ } while (err < 0 && err != -ECONNREFUSED);
++ skb_pull(skb, rlen);
++ }
++free_skb:
++ kfree_skb(skb);
+ }
+ mutex_unlock(&rx_queue_mutex);
+ }
+
++#define iscsi_cdev_to_conn(_cdev) \
++ iscsi_dev_to_conn(_cdev->dev)
++
+ #define ISCSI_CLASS_ATTR(_prefix,_name,_mode,_show,_store) \
+-struct device_attribute dev_attr_##_prefix##_##_name = \
++struct class_device_attribute class_device_attr_##_prefix##_##_name = \
+ __ATTR(_name,_mode,_show,_store)
+
+ /*
+@@ -1577,10 +1130,9 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
+ */
+ #define iscsi_conn_attr_show(param) \
+ static ssize_t \
+-show_conn_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_conn_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
++ struct iscsi_cls_conn *conn = iscsi_cdev_to_conn(cdev); \
+ struct iscsi_transport *t = conn->transport; \
+ return t->get_conn_param(conn, param, buf); \
+ }
+@@ -1601,68 +1153,43 @@ iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
+ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
+ iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
+ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+-iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
+-iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
++
++#define iscsi_cdev_to_session(_cdev) \
++ iscsi_dev_to_session(_cdev->dev)
+
+ /*
+ * iSCSI session attrs
+ */
+-#define iscsi_session_attr_show(param, perm) \
++#define iscsi_session_attr_show(param) \
+ static ssize_t \
+-show_session_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_session_param_##param(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
+ struct iscsi_transport *t = session->transport; \
+- \
+- if (perm && !capable(CAP_SYS_ADMIN)) \
+- return -EACCES; \
+ return t->get_session_param(session, param, buf); \
+ }
+
+-#define iscsi_session_attr(field, param, perm) \
+- iscsi_session_attr_show(param, perm) \
++#define iscsi_session_attr(field, param) \
++ iscsi_session_attr_show(param) \
+ static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
+ NULL);
+
+-iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
+-iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
+-iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
+-iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
+-iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
+-iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
+-iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
+-iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
+-iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
+-iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
+-iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
+-iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
+-iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
+-iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
+-iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
+-iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
+-iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+-iscsi_session_attr(ifacename, ISCSI_PARAM_IFACE_NAME, 0);
+-iscsi_session_attr(initiatorname, ISCSI_PARAM_INITIATOR_NAME, 0)
+-
+-static ssize_t
+-show_priv_session_state(struct device *dev, struct device_attribute *attr,
+- char *buf)
+-{
+- struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+- return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+-}
+-static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+- NULL);
++iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME);
++iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN);
++iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T);
++iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN);
++iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST);
++iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST);
++iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN);
++iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN);
++iscsi_session_attr(erl, ISCSI_PARAM_ERL);
++iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT);
+
+ #define iscsi_priv_session_attr_show(field, format) \
+ static ssize_t \
+-show_priv_session_##field(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
++show_priv_session_##field(struct class_device *cdev, char *buf) \
+ { \
+- struct iscsi_cls_session *session = \
+- iscsi_dev_to_session(dev->parent); \
++ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);\
+ return sprintf(buf, format"\n", session->field); \
+ }
+
+@@ -1672,32 +1199,9 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
+ NULL)
+ iscsi_priv_session_attr(recovery_tmo, "%d");
+
+-/*
+- * iSCSI host attrs
+- */
+-#define iscsi_host_attr_show(param) \
+-static ssize_t \
+-show_host_param_##param(struct device *dev, \
+- struct device_attribute *attr, char *buf) \
+-{ \
+- struct Scsi_Host *shost = transport_class_to_shost(dev); \
+- struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
+- return priv->iscsi_transport->get_host_param(shost, param, buf); \
+-}
+-
+-#define iscsi_host_attr(field, param) \
+- iscsi_host_attr_show(param) \
+-static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
+- NULL);
+-
+-iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
+-iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
+-iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
+-iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
+-
+ #define SETUP_PRIV_SESSION_RD_ATTR(field) \
+ do { \
+- priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
+ count++; \
+ } while (0)
+
+@@ -1705,7 +1209,7 @@ do { \
+ #define SETUP_SESSION_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->session_attrs[count] = &dev_attr_sess_##field; \
++ priv->session_attrs[count] = &class_device_attr_sess_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1713,15 +1217,7 @@ do { \
+ #define SETUP_CONN_RD_ATTR(field, param_flag) \
+ do { \
+ if (tt->param_mask & param_flag) { \
+- priv->conn_attrs[count] = &dev_attr_conn_##field; \
+- count++; \
+- } \
+-} while (0)
+-
+-#define SETUP_HOST_RD_ATTR(field, param_flag) \
+-do { \
+- if (tt->host_param_mask & param_flag) { \
+- priv->host_attrs[count] = &dev_attr_host_##field; \
++ priv->conn_attrs[count] = &class_device_attr_conn_##field; \
+ count++; \
+ } \
+ } while (0)
+@@ -1811,34 +1307,25 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+ priv->t.user_scan = iscsi_user_scan;
+- if (!(tt->caps & CAP_DATA_PATH_OFFLOAD))
+- priv->t.create_work_queue = 1;
+
+- priv->dev.class = &iscsi_transport_class;
+- snprintf(priv->dev.bus_id, BUS_ID_SIZE, "%s", tt->name);
+- err = device_register(&priv->dev);
++ priv->cdev.class = &iscsi_transport_class;
++ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
++ err = class_device_register(&priv->cdev);
+ if (err)
+ goto free_priv;
+
+- err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group);
++ err = sysfs_create_group(&priv->cdev.kobj, &iscsi_transport_group);
+ if (err)
+- goto unregister_dev;
++ goto unregister_cdev;
+
+ /* host parameters */
+ priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+ priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+ priv->t.host_attrs.ac.match = iscsi_host_match;
+- priv->t.host_size = sizeof(struct iscsi_cls_host);
++ priv->t.host_size = sizeof(struct iscsi_host);
++ priv->host_attrs[0] = NULL;
+ transport_container_register(&priv->t.host_attrs);
+
+- SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
+- SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
+- SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
+- SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
+- BUG_ON(count > ISCSI_HOST_ATTRS);
+- priv->host_attrs[count] = NULL;
+- count = 0;
+-
+ /* connection parameters */
+ priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
+ priv->conn_cont.ac.class = &iscsi_connection_class.class;
+@@ -1856,8 +1343,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
+ SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
+ SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
+- SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO);
+- SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO);
+
+ BUG_ON(count > ISCSI_CONN_ATTRS);
+ priv->conn_attrs[count] = NULL;
+@@ -1879,17 +1364,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
+ SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
+ SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
+- SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
+- SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
+- SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
+- SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
+- SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
+- SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
+- SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
+- SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
+- SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
+ SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+- SETUP_PRIV_SESSION_RD_ATTR(state);
+
+ BUG_ON(count > ISCSI_SESSION_ATTRS);
+ priv->session_attrs[count] = NULL;
+@@ -1901,9 +1376,8 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ printk(KERN_NOTICE "iscsi: registered transport (%s)\n", tt->name);
+ return &priv->t;
+
+-unregister_dev:
+- device_unregister(&priv->dev);
+- return NULL;
++unregister_cdev:
++ class_device_unregister(&priv->cdev);
+ free_priv:
+ kfree(priv);
+ return NULL;
+@@ -1930,8 +1404,8 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+ transport_container_unregister(&priv->session_cont);
+ transport_container_unregister(&priv->t.host_attrs);
+
+- sysfs_remove_group(&priv->dev.kobj, &iscsi_transport_group);
+- device_unregister(&priv->dev);
++ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
++ class_device_unregister(&priv->cdev);
+ mutex_unlock(&rx_queue_mutex);
+
+ return 0;
+@@ -1951,13 +1425,9 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ return err;
+
+- err = class_register(&iscsi_endpoint_class);
+- if (err)
+- goto unregister_transport_class;
+-
+ err = transport_class_register(&iscsi_host_class);
+ if (err)
+- goto unregister_endpoint_class;
++ goto unregister_transport_class;
+
+ err = transport_class_register(&iscsi_connection_class);
+ if (err)
+@@ -1967,29 +1437,21 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
+- NULL, THIS_MODULE);
++ nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+ goto unregister_session_class;
+ }
+
+- iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
+- if (!iscsi_eh_timer_workq)
+- goto release_nls;
+-
+ return 0;
+
+-release_nls:
+- netlink_kernel_release(nls);
+ unregister_session_class:
+ transport_class_unregister(&iscsi_session_class);
+ unregister_conn_class:
+ transport_class_unregister(&iscsi_connection_class);
+ unregister_host_class:
+ transport_class_unregister(&iscsi_host_class);
+-unregister_endpoint_class:
+- class_unregister(&iscsi_endpoint_class);
+ unregister_transport_class:
+ class_unregister(&iscsi_transport_class);
+ return err;
+@@ -1997,12 +1459,10 @@ unregister_transport_class:
+
+ static void __exit iscsi_transport_exit(void)
+ {
+- destroy_workqueue(iscsi_eh_timer_workq);
+- netlink_kernel_release(nls);
++ sock_release(nls->sk_socket);
+ transport_class_unregister(&iscsi_connection_class);
+ transport_class_unregister(&iscsi_session_class);
+ transport_class_unregister(&iscsi_host_class);
+- class_unregister(&iscsi_endpoint_class);
+ class_unregister(&iscsi_transport_class);
+ }
+
+diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
+index 16be12f..55ebf03 100644
+--- a/include/scsi/iscsi_if.h
++++ b/include/scsi/iscsi_if.h
+@@ -48,17 +48,12 @@ enum iscsi_uevent_e {
+ ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT = UEVENT_BASE + 14,
+
+ ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
+- ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
+- ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
+- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+
+ /* up events */
+ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
+ ISCSI_KEVENT_CONN_ERROR = KEVENT_BASE + 2,
+ ISCSI_KEVENT_IF_ERROR = KEVENT_BASE + 3,
+ ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
+- ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5,
+- ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6,
+ };
+
+ enum iscsi_tgt_dscvr {
+@@ -76,15 +71,7 @@ struct iscsi_uevent {
+ /* messages u -> k */
+ struct msg_create_session {
+ uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+ } c_session;
+- struct msg_create_bound_session {
+- uint64_t ep_handle;
+- uint32_t initial_cmdsn;
+- uint16_t cmds_max;
+- uint16_t queue_depth;
+- } c_bound_session;
+ struct msg_destroy_session {
+ uint32_t sid;
+ } d_session;
+@@ -149,11 +136,6 @@ struct iscsi_uevent {
+ */
+ uint32_t enable;
+ } tgt_dscvr;
+- struct msg_set_host_param {
+- uint32_t host_no;
+- uint32_t param; /* enum iscsi_host_param */
+- uint32_t len;
+- } set_host_param;
+ } u;
+ union {
+ /* messages k -> u */
+@@ -166,10 +148,6 @@ struct iscsi_uevent {
+ uint32_t sid;
+ uint32_t cid;
+ } c_conn_ret;
+- struct msg_unbind_session {
+- uint32_t sid;
+- uint32_t host_no;
+- } unbind_session;
+ struct msg_recv_req {
+ uint32_t sid;
+ uint32_t cid;
+@@ -245,78 +223,42 @@ enum iscsi_param {
+ ISCSI_PARAM_CONN_PORT,
+ ISCSI_PARAM_CONN_ADDRESS,
+
+- ISCSI_PARAM_USERNAME,
+- ISCSI_PARAM_USERNAME_IN,
+- ISCSI_PARAM_PASSWORD,
+- ISCSI_PARAM_PASSWORD_IN,
+-
+- ISCSI_PARAM_FAST_ABORT,
+- ISCSI_PARAM_ABORT_TMO,
+- ISCSI_PARAM_LU_RESET_TMO,
+- ISCSI_PARAM_HOST_RESET_TMO,
+-
+- ISCSI_PARAM_PING_TMO,
+- ISCSI_PARAM_RECV_TMO,
+-
+- ISCSI_PARAM_IFACE_NAME,
+- ISCSI_PARAM_ISID,
+- ISCSI_PARAM_INITIATOR_NAME,
+ /* must always be last */
+ ISCSI_PARAM_MAX,
+ };
+
+-#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
+-#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
+-#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
+-#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
+-#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
+-#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
+-#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
+-#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
+-#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
+-#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
+-#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
+-#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
+-#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
+-#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
+-#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
+-#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
+-#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
+-#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
+-#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
+-#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
+-#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
+-#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
+-#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
+-#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
+-#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
+-#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
+-#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
+-#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
+-#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
+-#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
+-#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
+-#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
+-#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
+-#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
+-#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
+-
+-/* iSCSI HBA params */
+-enum iscsi_host_param {
+- ISCSI_HOST_PARAM_HWADDRESS,
+- ISCSI_HOST_PARAM_INITIATOR_NAME,
+- ISCSI_HOST_PARAM_NETDEV_NAME,
+- ISCSI_HOST_PARAM_IPADDRESS,
+- ISCSI_HOST_PARAM_MAX,
+-};
+-
+-#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
+-#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
+-#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
+-#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
++#define ISCSI_MAX_RECV_DLENGTH (1 << ISCSI_PARAM_MAX_RECV_DLENGTH)
++#define ISCSI_MAX_XMIT_DLENGTH (1 << ISCSI_PARAM_MAX_XMIT_DLENGTH)
++#define ISCSI_HDRDGST_EN (1 << ISCSI_PARAM_HDRDGST_EN)
++#define ISCSI_DATADGST_EN (1 << ISCSI_PARAM_DATADGST_EN)
++#define ISCSI_INITIAL_R2T_EN (1 << ISCSI_PARAM_INITIAL_R2T_EN)
++#define ISCSI_MAX_R2T (1 << ISCSI_PARAM_MAX_R2T)
++#define ISCSI_IMM_DATA_EN (1 << ISCSI_PARAM_IMM_DATA_EN)
++#define ISCSI_FIRST_BURST (1 << ISCSI_PARAM_FIRST_BURST)
++#define ISCSI_MAX_BURST (1 << ISCSI_PARAM_MAX_BURST)
++#define ISCSI_PDU_INORDER_EN (1 << ISCSI_PARAM_PDU_INORDER_EN)
++#define ISCSI_DATASEQ_INORDER_EN (1 << ISCSI_PARAM_DATASEQ_INORDER_EN)
++#define ISCSI_ERL (1 << ISCSI_PARAM_ERL)
++#define ISCSI_IFMARKER_EN (1 << ISCSI_PARAM_IFMARKER_EN)
++#define ISCSI_OFMARKER_EN (1 << ISCSI_PARAM_OFMARKER_EN)
++#define ISCSI_EXP_STATSN (1 << ISCSI_PARAM_EXP_STATSN)
++#define ISCSI_TARGET_NAME (1 << ISCSI_PARAM_TARGET_NAME)
++#define ISCSI_TPGT (1 << ISCSI_PARAM_TPGT)
++#define ISCSI_PERSISTENT_ADDRESS (1 << ISCSI_PARAM_PERSISTENT_ADDRESS)
++#define ISCSI_PERSISTENT_PORT (1 << ISCSI_PARAM_PERSISTENT_PORT)
++#define ISCSI_SESS_RECOVERY_TMO (1 << ISCSI_PARAM_SESS_RECOVERY_TMO)
++#define ISCSI_CONN_PORT (1 << ISCSI_PARAM_CONN_PORT)
++#define ISCSI_CONN_ADDRESS (1 << ISCSI_PARAM_CONN_ADDRESS)
+
+ #define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
+ #define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
++#define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
++
++/**
++ * iscsi_hostdata - get LLD hostdata from scsi_host
++ * @_hostdata: pointer to scsi host's hostdata
++ **/
++#define iscsi_hostdata(_hostdata) ((void*)_hostdata + sizeof(unsigned long))
+
+ /*
+ * These flags presents iSCSI Data-Path capabilities.
+@@ -330,9 +272,6 @@ enum iscsi_host_param {
+ #define CAP_MULTI_CONN 0x40
+ #define CAP_TEXT_NEGO 0x80
+ #define CAP_MARKERS 0x100
+-#define CAP_FW_DB 0x200
+-#define CAP_SENDTARGETS_OFFLOAD 0x400
+-#define CAP_DATA_PATH_OFFLOAD 0x800
+
+ /*
+ * These flags describes reason of stop_conn() call
+diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
+index f2a2c11..8d1e4e8 100644
+--- a/include/scsi/iscsi_proto.h
++++ b/include/scsi/iscsi_proto.h
+@@ -21,16 +21,13 @@
+ #ifndef ISCSI_PROTO_H
+ #define ISCSI_PROTO_H
+
+-#include <linux/types.h>
+-#include <scsi/scsi.h>
+-
+ #define ISCSI_DRAFT20_VERSION 0x00
+
+ /* default iSCSI listen port for incoming connections */
+ #define ISCSI_LISTEN_PORT 3260
+
+ /* Padding word length */
+-#define ISCSI_PAD_LEN 4
++#define PAD_WORD_LEN 4
+
+ /*
+ * useful common(control and data pathes) macro
+@@ -46,8 +43,8 @@
+ /* initiator tags; opaque for target */
+ typedef uint32_t __bitwise__ itt_t;
+ /* below makes sense only for initiator that created this tag */
+-#define build_itt(itt, age) ((__force itt_t)\
+- ((itt) | ((age) << ISCSI_AGE_SHIFT)))
++#define build_itt(itt, id, age) ((__force itt_t)\
++ ((itt) | ((id) << ISCSI_CID_SHIFT) | ((age) << ISCSI_AGE_SHIFT)))
+ #define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK)
+ #define RESERVED_ITT ((__force itt_t)0xffffffff)
+
+@@ -113,7 +110,6 @@ struct iscsi_ahs_hdr {
+
+ #define ISCSI_AHSTYPE_CDB 1
+ #define ISCSI_AHSTYPE_RLENGTH 2
+-#define ISCSI_CDB_SIZE 16
+
+ /* iSCSI PDU Header */
+ struct iscsi_cmd {
+@@ -127,7 +123,7 @@ struct iscsi_cmd {
+ __be32 data_length;
+ __be32 cmdsn;
+ __be32 exp_statsn;
+- uint8_t cdb[ISCSI_CDB_SIZE]; /* SCSI Command Block */
++ uint8_t cdb[16]; /* SCSI Command Block */
+ /* Additional Data (Command Dependent) */
+ };
+
+@@ -151,15 +147,6 @@ struct iscsi_rlength_ahdr {
+ __be32 read_length;
+ };
+
+-/* Extended CDB AHS */
+-struct iscsi_ecdb_ahdr {
+- __be16 ahslength; /* CDB length - 15, including reserved byte */
+- uint8_t ahstype;
+- uint8_t reserved;
+- /* 4-byte aligned extended CDB spillover */
+- uint8_t ecdb[SCSI_MAX_VARLEN_CDB_SIZE - ISCSI_CDB_SIZE];
+-};
+-
+ /* SCSI Response Header */
+ struct iscsi_cmd_rsp {
+ uint8_t opcode;
+@@ -613,8 +600,6 @@ struct iscsi_reject {
+ #define ISCSI_MIN_MAX_BURST_LEN 512
+ #define ISCSI_MAX_MAX_BURST_LEN 16777215
+
+-#define ISCSI_DEF_TIME2WAIT 2
+-
+ /************************* RFC 3720 End *****************************/
+
+ #endif /* ISCSI_PROTO_H */
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index 5e75bb7..ea0816d 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -24,7 +24,6 @@
+ #define LIBISCSI_H
+
+ #include <linux/types.h>
+-#include <linux/wait.h>
+ #include <linux/mutex.h>
+ #include <linux/timer.h>
+ #include <linux/workqueue.h>
+@@ -32,7 +31,6 @@
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+-struct scsi_host_template;
+ struct scsi_device;
+ struct Scsi_Host;
+ struct scsi_cmnd;
+@@ -42,7 +40,6 @@ struct iscsi_cls_session;
+ struct iscsi_cls_conn;
+ struct iscsi_session;
+ struct iscsi_nopin;
+-struct device;
+
+ /* #define DEBUG_SCSI */
+ #ifdef DEBUG_SCSI
+@@ -51,68 +48,69 @@ struct device;
+ #define debug_scsi(fmt...)
+ #endif
+
+-#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* must be power of 2 */
+-#define ISCSI_MGMT_CMDS_MAX 15
++#define ISCSI_XMIT_CMDS_MAX 128 /* must be power of 2 */
++#define ISCSI_MGMT_CMDS_MAX 32 /* must be power of 2 */
++#define ISCSI_CONN_MAX 1
++
++#define ISCSI_MGMT_ITT_OFFSET 0xa00
+
+ #define ISCSI_DEF_CMD_PER_LUN 32
+ #define ISCSI_MAX_CMD_PER_LUN 128
+
+ /* Task Mgmt states */
+-enum {
+- TMF_INITIAL,
+- TMF_QUEUED,
+- TMF_SUCCESS,
+- TMF_FAILED,
+- TMF_TIMEDOUT,
+- TMF_NOT_FOUND,
+-};
++#define TMABORT_INITIAL 0x0
++#define TMABORT_SUCCESS 0x1
++#define TMABORT_FAILED 0x2
++#define TMABORT_TIMEDOUT 0x3
++#define TMABORT_NOT_FOUND 0x4
+
+ /* Connection suspend "bit" */
+ #define ISCSI_SUSPEND_BIT 1
+
+-#define ISCSI_ITT_MASK (0x1fff)
+-#define ISCSI_TOTAL_CMDS_MAX 4096
+-/* this must be a power of two greater than ISCSI_MGMT_CMDS_MAX */
+-#define ISCSI_TOTAL_CMDS_MIN 16
++#define ISCSI_ITT_MASK (0xfff)
++#define ISCSI_CID_SHIFT 12
++#define ISCSI_CID_MASK (0xffff << ISCSI_CID_SHIFT)
+ #define ISCSI_AGE_SHIFT 28
+ #define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
+
+-#define ISCSI_ADDRESS_BUF_LEN 64
+-
+-enum {
+- /* this is the maximum possible storage for AHSs */
+- ISCSI_MAX_AHS_SIZE = sizeof(struct iscsi_ecdb_ahdr) +
+- sizeof(struct iscsi_rlength_ahdr),
+- ISCSI_DIGEST_SIZE = sizeof(__u32),
++struct iscsi_mgmt_task {
++ /*
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
++ */
++ struct iscsi_hdr *hdr;
++ char *data; /* mgmt payload */
++ int data_count; /* counts data to be sent */
++ uint32_t itt; /* this ITT */
++ void *dd_data; /* driver/transport data */
++ struct list_head running;
+ };
+
+-
+ enum {
+ ISCSI_TASK_COMPLETED,
+ ISCSI_TASK_PENDING,
+ ISCSI_TASK_RUNNING,
+ };
+
+-struct iscsi_task {
++struct iscsi_cmd_task {
+ /*
+- * Because LLDs allocate their hdr differently, this is a pointer
+- * and length to that storage. It must be setup at session
+- * creation time.
++ * Becuae LLDs allocate their hdr differently, this is a pointer to
++ * that storage. It must be setup at session creation time.
+ */
+ struct iscsi_cmd *hdr;
+- unsigned short hdr_max;
+- unsigned short hdr_len; /* accumulated size of hdr used */
+ int itt; /* this ITT */
++ int datasn; /* DataSN */
+
+ uint32_t unsol_datasn;
+- unsigned imm_count; /* imm-data (bytes) */
+- unsigned unsol_count; /* unsolicited (bytes)*/
++ int imm_count; /* imm-data (bytes) */
++ int unsol_count; /* unsolicited (bytes)*/
+ /* offset in unsolicited stream (bytes); */
+- unsigned unsol_offset;
+- unsigned data_count; /* remaining Data-Out */
+- char *data; /* mgmt payload */
++ int unsol_offset;
++ int data_count; /* remaining Data-Out */
+ struct scsi_cmnd *sc; /* associated SCSI cmd*/
++ int total_length;
+ struct iscsi_conn *conn; /* used connection */
++ struct iscsi_mgmt_task *mtask; /* tmf mtask in progr */
+
+ /* state set/tested under session->lock */
+ int state;
+@@ -121,33 +119,19 @@ struct iscsi_task {
+ void *dd_data; /* driver/transport data */
+ };
+
+-static inline void* iscsi_next_hdr(struct iscsi_task *task)
+-{
+- return (void*)task->hdr + task->hdr_len;
+-}
+-
+-/* Connection's states */
+-enum {
+- ISCSI_CONN_INITIAL_STAGE,
+- ISCSI_CONN_STARTED,
+- ISCSI_CONN_STOPPED,
+- ISCSI_CONN_CLEANUP_WAIT,
+-};
+-
+ struct iscsi_conn {
+ struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
+ void *dd_data; /* iscsi_transport data */
+ struct iscsi_session *session; /* parent session */
+ /*
++ * LLDs should set this lock. It protects the transport recv
++ * code
++ */
++ rwlock_t *recv_lock;
++ /*
+ * conn_stop() flag: stop to recover, stop to terminate
+ */
+ int stop_stage;
+- struct timer_list transport_timer;
+- unsigned long last_recv;
+- unsigned long last_ping;
+- int ping_timeout;
+- int recv_timeout;
+- struct iscsi_task *ping_task;
+
+ /* iSCSI connection-wide sequencing */
+ uint32_t exp_statsn;
+@@ -163,28 +147,35 @@ struct iscsi_conn {
+ * should always fit in this buffer
+ */
+ char *data;
+- struct iscsi_task *login_task; /* mtask used for login/text */
+- struct iscsi_task *task; /* xmit task in progress */
++ struct iscsi_mgmt_task *login_mtask; /* mtask used for login/text */
++ struct iscsi_mgmt_task *mtask; /* xmit mtask in progress */
++ struct iscsi_cmd_task *ctask; /* xmit ctask in progress */
+
+ /* xmit */
+- struct list_head mgmtqueue; /* mgmt (control) xmit queue */
++ struct kfifo *immqueue; /* immediate xmit queue */
++ struct kfifo *mgmtqueue; /* mgmt (control) xmit queue */
+ struct list_head mgmt_run_list; /* list of control tasks */
+ struct list_head xmitqueue; /* data-path cmd queue */
+ struct list_head run_list; /* list of cmds in progress */
+- struct list_head requeue; /* tasks needing another run */
+ struct work_struct xmitwork; /* per-conn. xmit workqueue */
++ /*
++ * serializes connection xmit, access to kfifos:
++ * xmitqueue, immqueue, mgmtqueue
++ */
++ struct mutex xmitmutex;
++
+ unsigned long suspend_tx; /* suspend Tx */
+ unsigned long suspend_rx; /* suspend Rx */
+
+ /* abort */
+ wait_queue_head_t ehwait; /* used in eh_abort() */
+ struct iscsi_tm tmhdr;
+- struct timer_list tmf_timer;
+- int tmf_state; /* see TMF_INITIAL, etc.*/
++ struct timer_list tmabort_timer;
++ int tmabort_state; /* see TMABORT_INITIAL, etc.*/
+
+ /* negotiated params */
+- unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
+- unsigned max_xmit_dlength; /* target_max_recv_dsl */
++ int max_recv_dlength; /* initiator_max_recv_dsl*/
++ int max_xmit_dlength; /* target_max_recv_dsl */
+ int hdrdgst_en;
+ int datadgst_en;
+ int ifmarker_en;
+@@ -192,9 +183,6 @@ struct iscsi_conn {
+ /* values userspace uses to id a conn */
+ int persistent_port;
+ char *persistent_address;
+- /* remote portal currently connected to */
+- int portal_port;
+- char portal_address[ISCSI_ADDRESS_BUF_LEN];
+
+ /* MIB-statistics */
+ uint64_t txdata_octets;
+@@ -209,65 +197,34 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
+- uint32_t fmr_unalign_cnt;
+ };
+
+-struct iscsi_pool {
++struct iscsi_queue {
+ struct kfifo *queue; /* FIFO Queue */
+ void **pool; /* Pool of elements */
+ int max; /* Max number of elements */
+ };
+
+-/* Session's states */
+-enum {
+- ISCSI_STATE_FREE = 1,
+- ISCSI_STATE_LOGGED_IN,
+- ISCSI_STATE_FAILED,
+- ISCSI_STATE_TERMINATE,
+- ISCSI_STATE_IN_RECOVERY,
+- ISCSI_STATE_RECOVERY_FAILED,
+- ISCSI_STATE_LOGGING_OUT,
+-};
+-
+ struct iscsi_session {
+- struct iscsi_cls_session *cls_session;
+- /*
+- * Syncs up the scsi eh thread with the iscsi eh thread when sending
+- * task management functions. This must be taken before the session
+- * and recv lock.
+- */
+- struct mutex eh_mutex;
+-
+ /* iSCSI session-wide sequencing */
+ uint32_t cmdsn;
+ uint32_t exp_cmdsn;
+ uint32_t max_cmdsn;
+
+- /* This tracks the reqs queued into the initiator */
+- uint32_t queued_cmdsn;
+-
+ /* configuration */
+- int abort_timeout;
+- int lu_reset_timeout;
+ int initial_r2t_en;
+- unsigned max_r2t;
++ int max_r2t;
+ int imm_data_en;
+- unsigned first_burst;
+- unsigned max_burst;
++ int first_burst;
++ int max_burst;
+ int time2wait;
+ int time2retain;
+ int pdu_inorder_en;
+ int dataseq_inorder_en;
+ int erl;
+- int fast_abort;
+ int tpgt;
+- char *username;
+- char *username_in;
+- char *password;
+- char *password_in;
+ char *targetname;
+- char *ifacename;
+- char *initiatorname;
++
+ /* control data */
+ struct iscsi_transport *tt;
+ struct Scsi_Host *host;
+@@ -281,20 +238,12 @@ struct iscsi_session {
+ int state; /* session state */
+ int age; /* counts session re-opens */
+
+- int scsi_cmds_max; /* max scsi commands */
+ int cmds_max; /* size of cmds array */
+- struct iscsi_task **cmds; /* Original Cmds arr */
+- struct iscsi_pool cmdpool; /* PDU's pool */
+-};
+-
+-struct iscsi_host {
+- char *initiatorname;
+- /* hw address or netdev iscsi connection is bound to */
+- char *hwaddress;
+- char *netdev;
+- /* local address */
+- int local_port;
+- char local_address[ISCSI_ADDRESS_BUF_LEN];
++ struct iscsi_cmd_task **cmds; /* Original Cmds arr */
++ struct iscsi_queue cmdpool; /* PDU's pool */
++ int mgmtpool_max; /* size of mgmt array */
++ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
++ struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
+ };
+
+ /*
+@@ -303,48 +252,31 @@ struct iscsi_host {
+ extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth);
+ extern int iscsi_eh_abort(struct scsi_cmnd *sc);
+ extern int iscsi_eh_host_reset(struct scsi_cmnd *sc);
+-extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
+ extern int iscsi_queuecommand(struct scsi_cmnd *sc,
+ void (*done)(struct scsi_cmnd *));
+
+ /*
+- * iSCSI host helpers.
+- */
+-#define iscsi_host_priv(_shost) \
+- (shost_priv(_shost) + sizeof(struct iscsi_host))
+-
+-extern int iscsi_host_set_param(struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf,
+- int buflen);
+-extern int iscsi_host_get_param(struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf);
+-extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev);
+-extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
+- int dd_data_size, uint16_t qdepth);
+-extern void iscsi_host_remove(struct Scsi_Host *shost);
+-extern void iscsi_host_free(struct Scsi_Host *shost);
+-
+-/*
+ * session management
+ */
+ extern struct iscsi_cls_session *
+-iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost,
+- uint16_t, int, uint32_t, unsigned int);
++iscsi_session_setup(struct iscsi_transport *, struct scsi_transport_template *,
++ int, int, uint32_t, uint32_t *);
+ extern void iscsi_session_teardown(struct iscsi_cls_session *);
++extern struct iscsi_session *class_to_transport_session(struct iscsi_cls_session *);
+ extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *);
+ extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen);
+ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
+ enum iscsi_param param, char *buf);
+
+-#define iscsi_session_printk(prefix, _sess, fmt, a...) \
+- iscsi_cls_session_printk(prefix, _sess->cls_session, fmt, ##a)
++#define session_to_cls(_sess) \
++ hostdata_session(_sess->host->hostdata)
+
+ /*
+ * connection management
+ */
+ extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session *,
+- int, uint32_t);
++ uint32_t);
+ extern void iscsi_conn_teardown(struct iscsi_cls_conn *);
+ extern int iscsi_conn_start(struct iscsi_cls_conn *);
+ extern void iscsi_conn_stop(struct iscsi_cls_conn *, int);
+@@ -353,17 +285,13 @@ extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *,
+ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
+ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf);
+-extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+-
+-#define iscsi_conn_printk(prefix, _c, fmt, a...) \
+- iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \
+- fmt, ##a)
+
+ /*
+ * pdu and task processing
+ */
+-extern void iscsi_update_cmdsn(struct iscsi_session *, struct iscsi_nopin *);
+-extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_task *,
++extern int iscsi_check_assign_cmdsn(struct iscsi_session *,
++ struct iscsi_nopin *);
++extern void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *,
+ struct iscsi_data *hdr);
+ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
+ char *, uint32_t);
+@@ -371,34 +299,13 @@ extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
+ char *, int);
+-extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
+-extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+-extern void iscsi_requeue_task(struct iscsi_task *task);
+-extern void iscsi_put_task(struct iscsi_task *task);
+-extern void __iscsi_get_task(struct iscsi_task *task);
++extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
++ uint32_t *);
+
+ /*
+ * generic helpers
+ */
+-extern void iscsi_pool_free(struct iscsi_pool *);
+-extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int);
+-
+-/*
+- * inline functions to deal with padding.
+- */
+-static inline unsigned int
+-iscsi_padded(unsigned int len)
+-{
+- return (len + ISCSI_PAD_LEN - 1) & ~(ISCSI_PAD_LEN - 1);
+-}
+-
+-static inline unsigned int
+-iscsi_padding(unsigned int len)
+-{
+- len &= (ISCSI_PAD_LEN - 1);
+- if (len)
+- len = ISCSI_PAD_LEN - len;
+- return len;
+-}
++extern void iscsi_pool_free(struct iscsi_queue *, void **);
++extern int iscsi_pool_init(struct iscsi_queue *, int, void ***, int);
+
+ #endif
+diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
+index 8b6c91d..d5c218d 100644
+--- a/include/scsi/scsi_transport_iscsi.h
++++ b/include/scsi/scsi_transport_iscsi.h
+@@ -24,17 +24,15 @@
+ #define SCSI_TRANSPORT_ISCSI_H
+
+ #include <linux/device.h>
+-#include <linux/list.h>
+-#include <linux/mutex.h>
+ #include <scsi/iscsi_if.h>
+
+ struct scsi_transport_template;
+ struct iscsi_transport;
+-struct iscsi_endpoint;
+ struct Scsi_Host;
+ struct iscsi_cls_conn;
+ struct iscsi_conn;
+-struct iscsi_task;
++struct iscsi_cmd_task;
++struct iscsi_mgmt_task;
+ struct sockaddr;
+
+ /**
+@@ -58,22 +56,19 @@ struct sockaddr;
+ * @stop_conn: suspend/recover/terminate connection
+ * @send_pdu: send iSCSI PDU, Login, Logout, NOP-Out, Reject, Text.
+ * @session_recovery_timedout: notify LLD a block during recovery timed out
+- * @init_task: Initialize a iscsi_task and any internal structs.
+- * When offloading the data path, this is called from
+- * queuecommand with the session lock, or from the
+- * iscsi_conn_send_pdu context with the session lock.
+- * When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @xmit_task Requests LLD to transfer cmd task. Returns 0 or the
++ * @init_cmd_task: Initialize a iscsi_cmd_task and any internal structs.
++ * Called from queuecommand with session lock held.
++ * @init_mgmt_task: Initialize a iscsi_mgmt_task and any internal structs.
++ * Called from iscsi_conn_send_generic with xmitmutex.
++ * @xmit_cmd_task: Requests LLD to transfer cmd task. Returns 0 or the
+ * the number of bytes transferred on success, and -Exyz
+- * value on error. When offloading the data path, this
+- * is called from queuecommand with the session lock, or
+- * from the iscsi_conn_send_pdu context with the session
+- * lock. When not offloading the data path, this is called
+- * from the scsi work queue without the session lock.
+- * @cleanup_task: requests LLD to fail task. Called with session lock
+- * and after the connection has been suspended and
+- * terminated during recovery. If called
++ * value on error.
++ * @xmit_mgmt_task: Requests LLD to transfer mgmt task. Returns 0 or the
++ * the number of bytes transferred on success, and -Exyz
++ * value on error.
++ * @cleanup_cmd_task: requests LLD to fail cmd task. Called with xmitmutex
++ * and session->lock after the connection has been
++ * suspended and terminated during recovery. If called
+ * from abort task then connection is not suspended
+ * or terminated but sk_callback_lock is held
+ *
+@@ -84,11 +79,17 @@ struct iscsi_transport {
+ char *name;
+ unsigned int caps;
+ /* LLD sets this to indicate what values it can export to sysfs */
+- uint64_t param_mask;
+- uint64_t host_param_mask;
+- struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t sn, uint32_t *hn);
++ unsigned int param_mask;
++ struct scsi_host_template *host_template;
++ /* LLD connection data size */
++ int conndata_size;
++ /* LLD session data size */
++ int sessiondata_size;
++ int max_lun;
++ unsigned int max_conn;
++ unsigned int max_cmd_len;
++ struct iscsi_cls_session *(*create_session) (struct iscsi_transport *it,
++ struct scsi_transport_template *t, uint32_t sn, uint32_t *hn);
+ void (*destroy_session) (struct iscsi_cls_session *session);
+ struct iscsi_cls_conn *(*create_conn) (struct iscsi_cls_session *sess,
+ uint32_t cid);
+@@ -104,25 +105,26 @@ struct iscsi_transport {
+ enum iscsi_param param, char *buf);
+ int (*get_session_param) (struct iscsi_cls_session *session,
+ enum iscsi_param param, char *buf);
+- int (*get_host_param) (struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf);
+- int (*set_host_param) (struct Scsi_Host *shost,
+- enum iscsi_host_param param, char *buf,
+- int buflen);
+ int (*send_pdu) (struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size);
+ void (*get_stats) (struct iscsi_cls_conn *conn,
+ struct iscsi_stats *stats);
+- int (*init_task) (struct iscsi_task *task);
+- int (*xmit_task) (struct iscsi_task *task);
+- void (*cleanup_task) (struct iscsi_conn *conn,
+- struct iscsi_task *task);
++ void (*init_cmd_task) (struct iscsi_cmd_task *ctask);
++ void (*init_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask,
++ char *data, uint32_t data_size);
++ int (*xmit_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ void (*cleanup_cmd_task) (struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
++ int (*xmit_mgmt_task) (struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+ void (*session_recovery_timedout) (struct iscsi_cls_session *session);
+- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+- int non_blocking);
+- int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
+- void (*ep_disconnect) (struct iscsi_endpoint *ep);
+- int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
++ int (*ep_connect) (struct sockaddr *dst_addr, int non_blocking,
++ uint64_t *ep_handle);
++ int (*ep_poll) (uint64_t ep_handle, int timeout_ms);
++ void (*ep_disconnect) (uint64_t ep_handle);
++ int (*tgt_dscvr) (enum iscsi_tgt_dscvr type, uint32_t host_no,
+ uint32_t enable, struct sockaddr *dst_addr);
+ };
+
+@@ -139,6 +141,13 @@ extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
+ extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ char *data, uint32_t data_size);
+
++
++/* Connection's states */
++#define ISCSI_CONN_INITIAL_STAGE 0
++#define ISCSI_CONN_STARTED 1
++#define ISCSI_CONN_STOPPED 2
++#define ISCSI_CONN_CLEANUP_WAIT 3
++
+ struct iscsi_cls_conn {
+ struct list_head conn_list; /* item in connlist */
+ void *dd_data; /* LLD private data */
+@@ -152,34 +161,25 @@ struct iscsi_cls_conn {
+ #define iscsi_dev_to_conn(_dev) \
+ container_of(_dev, struct iscsi_cls_conn, dev)
+
+-#define iscsi_conn_to_session(_conn) \
+- iscsi_dev_to_session(_conn->dev.parent)
+-
+-/* iscsi class session state */
+-enum {
+- ISCSI_SESSION_LOGGED_IN,
+- ISCSI_SESSION_FAILED,
+- ISCSI_SESSION_FREE,
+-};
+-
+-#define ISCSI_MAX_TARGET -1
++/* Session's states */
++#define ISCSI_STATE_FREE 1
++#define ISCSI_STATE_LOGGED_IN 2
++#define ISCSI_STATE_FAILED 3
++#define ISCSI_STATE_TERMINATE 4
++#define ISCSI_STATE_IN_RECOVERY 5
++#define ISCSI_STATE_RECOVERY_FAILED 6
+
+ struct iscsi_cls_session {
+ struct list_head sess_list; /* item in session_list */
++ struct list_head host_list;
+ struct iscsi_transport *transport;
+- spinlock_t lock;
+- struct work_struct block_work;
+- struct work_struct unblock_work;
+- struct work_struct scan_work;
+- struct work_struct unbind_work;
+
+ /* recovery fields */
+ int recovery_tmo;
+ struct delayed_work recovery_work;
+
+- unsigned int target_id;
++ int target_id;
+
+- int state;
+ int sid; /* session id */
+ void *dd_data; /* LLD private data */
+ struct device dev; /* sysfs transport/container device */
+@@ -194,53 +194,31 @@ struct iscsi_cls_session {
+ #define starget_to_session(_stgt) \
+ iscsi_dev_to_session(_stgt->dev.parent)
+
+-struct iscsi_cls_host {
+- atomic_t nr_scans;
++struct iscsi_host {
++ struct list_head sessions;
+ struct mutex mutex;
+- struct workqueue_struct *scan_workq;
+- char scan_workq_name[20];
+-};
+-
+-extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
+- void (*fn)(struct iscsi_cls_session *));
+-
+-struct iscsi_endpoint {
+- void *dd_data; /* LLD private data */
+- struct device dev;
+- unsigned int id;
+ };
+
+ /*
+ * session and connection functions that can be used by HW iSCSI LLDs
+ */
+-#define iscsi_cls_session_printk(prefix, _cls_session, fmt, a...) \
+- dev_printk(prefix, &(_cls_session)->dev, fmt, ##a)
+-
+-#define iscsi_cls_conn_printk(prefix, _cls_conn, fmt, a...) \
+- dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a)
+-
+-extern int iscsi_session_chkready(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
+- struct iscsi_transport *transport, int dd_size);
++ struct iscsi_transport *transport);
+ extern int iscsi_add_session(struct iscsi_cls_session *session,
+ unsigned int target_id);
+-extern int iscsi_session_event(struct iscsi_cls_session *session,
+- enum iscsi_uevent_e event);
++extern int iscsi_if_create_session_done(struct iscsi_cls_conn *conn);
++extern int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn);
+ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost,
+ struct iscsi_transport *t,
+- int dd_size,
+ unsigned int target_id);
+ extern void iscsi_remove_session(struct iscsi_cls_session *session);
+ extern void iscsi_free_session(struct iscsi_cls_session *session);
+ extern int iscsi_destroy_session(struct iscsi_cls_session *session);
+ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
+- int dd_size, uint32_t cid);
++ uint32_t cid);
+ extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
+ extern void iscsi_unblock_session(struct iscsi_cls_session *session);
+ extern void iscsi_block_session(struct iscsi_cls_session *session);
+-extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
+-extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
+-extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
+-extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
++
+
+ #endif
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U7/iscsi_02_add_to_2_6_9.patch b/kernel_patches/backport/2.6.9_U7/iscsi_02_add_to_2_6_9.patch
new file mode 100644
index 0000000..1f05d95
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iscsi_02_add_to_2_6_9.patch
@@ -0,0 +1,180 @@
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 4376840..11dfaf9 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -2145,7 +2145,6 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
+ static struct scsi_host_template iscsi_sht = {
+ .name = "iSCSI Initiator over TCP/IP",
+ .queuecommand = iscsi_queuecommand,
+- .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_SG_TABLESIZE,
+ .max_sectors = 0xFFFF,
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index d37048c..60f5846 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1366,7 +1366,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
+ shost->max_lun = iscsit->max_lun;
+ shost->max_cmd_len = iscsit->max_cmd_len;
+ shost->transportt = scsit;
+- shost->transportt->create_work_queue = 1;
+ *hostno = shost->host_no;
+
+ session = iscsi_hostdata(shost->hostdata);
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 8133c22..f1c68f7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -65,6 +65,8 @@ static DEFINE_SPINLOCK(iscsi_transport_lock);
+ #define cdev_to_iscsi_internal(_cdev) \
+ container_of(_cdev, struct iscsi_internal, cdev)
+
++extern int attribute_container_init(void);
++
+ static void iscsi_transport_release(struct class_device *cdev)
+ {
+ struct iscsi_internal *priv = cdev_to_iscsi_internal(cdev);
+@@ -80,6 +82,17 @@ static struct class iscsi_transport_class = {
+ .release = iscsi_transport_release,
+ };
+
++static void iscsi_host_class_release(struct class_device *class_dev)
++{
++ struct Scsi_Host *shost = transport_class_to_shost(class_dev);
++ put_device(&shost->shost_gendev);
++}
++
++struct class iscsi_host_class = {
++ .name = "iscsi_host",
++ .release = iscsi_host_class_release,
++};
++
+ static ssize_t
+ show_transport_handle(struct class_device *cdev, char *buf)
+ {
+@@ -115,10 +128,8 @@ static struct attribute_group iscsi_transport_group = {
+ .attrs = iscsi_transport_attrs,
+ };
+
+-static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+- struct class_device *cdev)
++static int iscsi_setup_host(struct Scsi_Host *shost)
+ {
+- struct Scsi_Host *shost = dev_to_shost(dev);
+ struct iscsi_host *ihost = shost->shost_data;
+
+ memset(ihost, 0, sizeof(*ihost));
+@@ -127,12 +138,6 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
+ return 0;
+ }
+
+-static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
+- "iscsi_host",
+- iscsi_setup_host,
+- NULL,
+- NULL);
+-
+ static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
+ "iscsi_session",
+ NULL,
+@@ -216,24 +221,6 @@ static int iscsi_is_session_dev(const struct device *dev)
+ return dev->release == iscsi_session_release;
+ }
+
+-static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
+- uint id, uint lun)
+-{
+- struct iscsi_host *ihost = shost->shost_data;
+- struct iscsi_cls_session *session;
+-
+- mutex_lock(&ihost->mutex);
+- list_for_each_entry(session, &ihost->sessions, host_list) {
+- if ((channel == SCAN_WILD_CARD || channel == 0) &&
+- (id == SCAN_WILD_CARD || id == session->target_id))
+- scsi_scan_target(&session->dev, 0,
+- session->target_id, lun, 1);
+- }
+- mutex_unlock(&ihost->mutex);
+-
+- return 0;
+-}
+-
+ static void session_recovery_timedout(struct work_struct *work)
+ {
+ struct iscsi_cls_session *session =
+@@ -362,8 +349,6 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
+ list_del(&session->host_list);
+ mutex_unlock(&ihost->mutex);
+
+- scsi_remove_target(&session->dev);
+-
+ transport_unregister_device(&session->dev);
+ device_del(&session->dev);
+ }
+@@ -1269,24 +1254,6 @@ static int iscsi_conn_match(struct attribute_container *cont,
+ return &priv->conn_cont.ac == cont;
+ }
+
+-static int iscsi_host_match(struct attribute_container *cont,
+- struct device *dev)
+-{
+- struct Scsi_Host *shost;
+- struct iscsi_internal *priv;
+-
+- if (!scsi_is_host_device(dev))
+- return 0;
+-
+- shost = dev_to_shost(dev);
+- if (!shost->transportt ||
+- shost->transportt->host_attrs.ac.class != &iscsi_host_class.class)
+- return 0;
+-
+- priv = to_iscsi_internal(shost->transportt);
+- return &priv->t.host_attrs.ac == cont;
+-}
+-
+ struct scsi_transport_template *
+ iscsi_register_transport(struct iscsi_transport *tt)
+ {
+@@ -1306,7 +1273,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ INIT_LIST_HEAD(&priv->list);
+ priv->daemon_pid = -1;
+ priv->iscsi_transport = tt;
+- priv->t.user_scan = iscsi_user_scan;
+
+ priv->cdev.class = &iscsi_transport_class;
+ snprintf(priv->cdev.class_id, BUS_ID_SIZE, "%s", tt->name);
+@@ -1319,12 +1285,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
+ goto unregister_cdev;
+
+ /* host parameters */
+- priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
+- priv->t.host_attrs.ac.class = &iscsi_host_class.class;
+- priv->t.host_attrs.ac.match = iscsi_host_match;
++ priv->t.host_attrs = &priv->host_attrs[0];
++ priv->t.host_class = &iscsi_host_class;
++ priv->t.host_setup = iscsi_setup_host;
+ priv->t.host_size = sizeof(struct iscsi_host);
+- priv->host_attrs[0] = NULL;
+- transport_container_register(&priv->t.host_attrs);
+
+ /* connection parameters */
+ priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
+@@ -1402,7 +1366,6 @@ int iscsi_unregister_transport(struct iscsi_transport *tt)
+
+ transport_container_unregister(&priv->conn_cont);
+ transport_container_unregister(&priv->session_cont);
+- transport_container_unregister(&priv->t.host_attrs);
+
+ sysfs_remove_group(&priv->cdev.kobj, &iscsi_transport_group);
+ class_device_unregister(&priv->cdev);
+@@ -1420,6 +1420,7 @@ static __init int iscsi_transport_init(void)
+ ISCSI_TRANSPORT_VERSION);
+
+ atomic_set(&iscsi_session_nr, 0);
++ attribute_container_init();
+
+ err = class_register(&iscsi_transport_class);
+ if (err)
+ return err;
diff --git a/kernel_patches/backport/2.6.9_U7/iscsi_03_add_session_wq.patch b/kernel_patches/backport/2.6.9_U7/iscsi_03_add_session_wq.patch
new file mode 100644
index 0000000..5a77c07
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iscsi_03_add_session_wq.patch
@@ -0,0 +1,76 @@
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index a6f2303..5d62cc0 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -612,7 +612,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (resume_tx) {
+ iser_dbg("%ld resuming tx\n",jiffies);
+- scsi_queue_work(conn->session->host, &conn->xmitwork);
++ queue_work(conn->session->wq, &conn->xmitwork);
+ }
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index e8020a5..43e9128 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -828,7 +828,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
+ session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
+ spin_unlock(&session->lock);
+
+- scsi_queue_work(host, &conn->xmitwork);
++ queue_work(session->wq, &conn->xmitwork);
+ return 0;
+
+ reject:
+@@ -928,7 +928,7 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ else
+ __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
+
+- scsi_queue_work(session->host, &conn->xmitwork);
++ queue_work(session->wq, &conn->xmitwork);
+ return 0;
+ }
+
+@@ -1415,6 +1415,9 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
+ INIT_LIST_HEAD(&mtask->running);
+ }
+
++ session->wq = create_singlethread_workqueue("");
++ BUG_ON(!session->wq);
++
+ if (scsi_add_host(shost, NULL))
+ goto add_host_fail;
+
+@@ -1462,6 +1465,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
+
+ kfree(session->targetname);
+
++ destroy_workqueue(session->wq);
++
+ iscsi_destroy_session(cls_session);
+ scsi_host_put(shost);
+ module_put(owner);
+@@ -1595,7 +1600,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ }
+
+ /* flush queued up work because we free the connection below */
+- scsi_flush_work(session->host);
++ flush_workqueue(session->wq);
+
+ spin_lock_bh(&session->lock);
+ kfree(conn->data);
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index ea0816d..e8a95f5 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -244,6 +244,8 @@ struct iscsi_session {
+ int mgmtpool_max; /* size of mgmt array */
+ struct iscsi_mgmt_task **mgmt_cmds; /* Original mgmt arr */
+ struct iscsi_queue mgmtpool; /* Mgmt PDU's pool */
++
++ struct workqueue_struct *wq;
+ };
+
+ /*
diff --git a/kernel_patches/backport/2.6.9_U7/iscsi_04_inet_sock_to_opt.patch b/kernel_patches/backport/2.6.9_U7/iscsi_04_inet_sock_to_opt.patch
new file mode 100644
index 0000000..1fb2376
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iscsi_04_inet_sock_to_opt.patch
@@ -0,0 +1,13 @@
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 905efc4..f73a743 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -2027,7 +2027,7 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+- struct inet_sock *inet;
++ struct inet_opt *inet;
+ struct ipv6_pinfo *np;
+ struct sock *sk;
+ int len;
diff --git a/kernel_patches/backport/2.6.9_U7/iscsi_05_release_host_lock_before_eh.patch b/kernel_patches/backport/2.6.9_U7/iscsi_05_release_host_lock_before_eh.patch
new file mode 100644
index 0000000..c994506
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iscsi_05_release_host_lock_before_eh.patch
@@ -0,0 +1,60 @@
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 7db081b..211944e 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -968,12 +968,14 @@ int iscsi_eh_host_reset(struct scsi_cmnd *sc)
+ struct iscsi_conn *conn = session->leadconn;
+ int fail_session = 0;
+
++ spin_unlock_irq(host->host_lock);
+ spin_lock_bh(&session->lock);
+ if (session->state == ISCSI_STATE_TERMINATE) {
+ failed:
+ debug_scsi("failing host reset: session terminated "
+ "[CID %d age %d]\n", conn->id, session->age);
+ spin_unlock_bh(&session->lock);
++ spin_lock_irq(host->host_lock);
+ return FAILED;
+ }
+
+@@ -1005,6 +1007,7 @@ failed:
+ else
+ goto failed;
+ spin_unlock_bh(&session->lock);
++ spin_lock_irq(host->host_lock);
+
+ return SUCCESS;
+ }
+@@ -1162,13 +1165,16 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
+ struct iscsi_conn *conn;
+ struct iscsi_session *session;
+ int rc;
++ struct Scsi_Host *shost = sc->device->host;
+
++ spin_unlock_irq(shost->host_lock);
+ /*
+ * if session was ISCSI_STATE_IN_RECOVERY then we may not have
+ * got the command.
+ */
+ if (!sc->SCp.ptr) {
+ debug_scsi("sc never reached iscsi layer or it completed.\n");
++ spin_lock_irq(shost->host_lock);
+ return SUCCESS;
+ }
+
+@@ -1253,6 +1259,7 @@ success_cleanup:
+
+ success_rel_mutex:
+ mutex_unlock(&conn->xmitmutex);
++ spin_lock_irq(shost->host_lock);
+ return SUCCESS;
+
+ failed:
+@@ -1260,6 +1267,7 @@ failed:
+ mutex_unlock(&conn->xmitmutex);
+
+ debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
++ spin_lock_irq(shost->host_lock);
+ return FAILED;
+ }
+ EXPORT_SYMBOL_GPL(iscsi_eh_abort);
diff --git a/kernel_patches/backport/2.6.9_U7/iscsi_06_scsi_addons.patch b/kernel_patches/backport/2.6.9_U7/iscsi_06_scsi_addons.patch
new file mode 100644
index 0000000..a114696
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iscsi_06_scsi_addons.patch
@@ -0,0 +1,75 @@
+diff --git a/drivers/scsi/init.c b/drivers/scsi/init.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/init.c
+@@ -0,0 +1 @@
++#include "src/init.c"
+diff --git a/drivers/scsi/attribute_container.c b/drivers/scsi/attribute_container.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/attribute_container.c
+@@ -0,0 +1 @@
++#include "../drivers/base/attribute_container.c"
+diff --git a/drivers/scsi/transport_class.c b/drivers/scsi/transport_class.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/transport_class.c
+@@ -0,0 +1 @@
++#include "../drivers/base/transport_class.c"
+diff --git a/drivers/scsi/klist.c b/drivers/scsi/klist.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/klist.c
+@@ -0,0 +1 @@
++#include "../../lib/klist.c"
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi.c
+@@ -0,0 +1 @@
++#include "src/scsi.c"
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_lib.c
+@@ -0,0 +1 @@
++#include "src/scsi_lib.c"
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_scan.c
+@@ -0,0 +1 @@
++#include "src/scsi_scan.c"
+diff --git a/drivers/scsi/libiscsi_f.c b/drivers/scsi/libiscsi_f.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/libiscsi_f.c
+@@ -0,0 +1 @@
++#include "libiscsi.c"
+diff --git a/drivers/scsi/scsi_transport_iscsi_f.c b/drivers/scsi/scsi_transport_iscsi_f.c
+new file mode 100644
+index 0000000..58cf933
+--- /dev/null
++++ b/drivers/scsi/scsi_transport_iscsi_f.c
+@@ -0,0 +1 @@
++#include "scsi_transport_iscsi.c"
+diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
+index e212608..3bf2015 100644
+--- a/drivers/scsi/Makefile
++++ b/drivers/scsi/Makefile
+@@ -3,2 +3,7 @@
+ obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o
+ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
++
++CFLAGS_attribute_container.o = $(BACKPORT_INCLUDES)/src/
++
++scsi_transport_iscsi-y := scsi_transport_iscsi_f.o scsi.o scsi_lib.o init.o klist.o attribute_container.o transport_class.o
++libiscsi-y := libiscsi_f.o scsi_scan.o
diff --git a/kernel_patches/backport/2.6.9_U7/iser_00_sync_kernel_code_with_2.6.26.patch b/kernel_patches/backport/2.6.9_U7/iser_00_sync_kernel_code_with_2.6.26.patch
new file mode 100644
index 0000000..ff5d719
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_00_sync_kernel_code_with_2.6.26.patch
@@ -0,0 +1,1549 @@
+From 12b757e92112750b4bc90cf8150d20484d684dcf Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 21 Aug 2008 14:28:56 +0300
+Subject: [PATCH] iser_sync_kernel_code_with_2.6.26
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 359 ++++++++++++--------------
+ drivers/infiniband/ulp/iser/iscsi_iser.h | 46 ++--
+ drivers/infiniband/ulp/iser/iser_initiator.c | 211 ++++++++--------
+ drivers/infiniband/ulp/iser/iser_memory.c | 79 +++---
+ drivers/infiniband/ulp/iser/iser_verbs.c | 31 +--
+ 5 files changed, 345 insertions(+), 381 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5a1cf25..aeb58ca 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -42,6 +42,9 @@
+ * Zhenyu Wang
+ * Modified by:
+ * Erez Zilber
++ *
++ *
++ * $Id: iscsi_iser.c 6965 2006-05-07 11:36:20Z ogerlitz $
+ */
+
+ #include <linux/types.h>
+@@ -71,10 +74,6 @@
+
+ #include "iscsi_iser.h"
+
+-static struct scsi_host_template iscsi_iser_sht;
+-static struct iscsi_transport iscsi_iser_transport;
+-static struct scsi_transport_template *iscsi_iser_scsi_transport;
+-
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
+
+@@ -95,6 +94,7 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr, char *rx_data, int rx_data_len)
+ {
+ int rc = 0;
++ uint32_t ret_itt;
+ int datalen;
+ int ahslen;
+
+@@ -110,7 +110,12 @@ iscsi_iser_recv(struct iscsi_conn *conn,
+ /* read AHS */
+ ahslen = hdr->hlength * 4;
+
+- rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++ /* verify itt (itt encoding: age+cid+itt) */
++ rc = iscsi_verify_itt(conn, hdr, &ret_itt);
++
++ if (!rc)
++ rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
++
+ if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
+ goto error;
+
+@@ -121,33 +126,25 @@ error:
+
+
+ /**
+- * iscsi_iser_task_init - Initialize task
+- * @task: iscsi task
++ * iscsi_iser_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
+ *
+- * Initialize the task for the scsi command or mgmt command.
+- */
++ **/
+ static int
+-iscsi_iser_task_init(struct iscsi_task *task)
++iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt task */
+- if (!task->sc) {
+- iser_task->desc.data = task->data;
+- return 0;
+- }
+-
+- iser_task->command_sent = 0;
+- iser_task->iser_conn = iser_conn;
+- iser_task_rdma_init(iser_task);
++ iser_ctask->command_sent = 0;
++ iser_ctask->iser_conn = iser_conn;
++ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+
+ /**
+- * iscsi_iser_mtask_xmit - xmit management(immediate) task
++ * iscsi_mtask_xmit - xmit management(immediate) task
+ * @conn: iscsi connection
+- * @task: task management task
++ * @mtask: task management task
+ *
+ * Notes:
+ * The function can return -EAGAIN in which case caller must
+@@ -156,19 +153,20 @@ iscsi_iser_task_init(struct iscsi_task *task)
+ *
+ **/
+ static int
+-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask)
+ {
+ int error = 0;
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
++ debug_scsi("mtask deq [cid %d itt 0x%x]\n", conn->id, mtask->itt);
+
+- error = iser_send_control(conn, task);
++ error = iser_send_control(conn, mtask);
+
+- /* since iser xmits control with zero copy, tasks can not be recycled
++ /* since iser xmits control with zero copy, mtasks can not be recycled
+ * right after sending them.
+ * The recycling scheme is based on whether a response is expected
+- * - if yes, the task is recycled at iscsi_complete_pdu
+- * - if no, the task is recycled at iser_snd_completion
++ * - if yes, the mtask is recycled at iscsi_complete_pdu
++ * - if no, the mtask is recycled at iser_snd_completion
+ */
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+@@ -177,86 +175,97 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+ }
+
+ static int
+-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++iscsi_iser_ctask_xmit_unsol_data(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_data hdr;
+ int error = 0;
+
+ /* Send data-out PDUs while there's still unsolicited data to send */
+- while (task->unsol_count > 0) {
+- iscsi_prep_unsolicit_data_pdu(task, &hdr);
++ while (ctask->unsol_count > 0) {
++ iscsi_prep_unsolicit_data_pdu(ctask, &hdr);
+ debug_scsi("Sending data-out: itt 0x%x, data count %d\n",
+- hdr.itt, task->data_count);
++ hdr.itt, ctask->data_count);
+
+ /* the buffer description has been passed with the command */
+ /* Send the command */
+- error = iser_send_data_out(conn, task, &hdr);
++ error = iser_send_data_out(conn, ctask, &hdr);
+ if (error) {
+- task->unsol_datasn--;
+- goto iscsi_iser_task_xmit_unsol_data_exit;
++ ctask->unsol_datasn--;
++ goto iscsi_iser_ctask_xmit_unsol_data_exit;
+ }
+- task->unsol_count -= task->data_count;
++ ctask->unsol_count -= ctask->data_count;
+ debug_scsi("Need to send %d more as data-out PDUs\n",
+- task->unsol_count);
++ ctask->unsol_count);
+ }
+
+-iscsi_iser_task_xmit_unsol_data_exit:
++iscsi_iser_ctask_xmit_unsol_data_exit:
+ return error;
+ }
+
+ static int
+-iscsi_iser_task_xmit(struct iscsi_task *task)
++iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_conn *conn = task->conn;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (!task->sc)
+- return iscsi_iser_mtask_xmit(conn, task);
+-
+- if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(task->sc) == 0);
++ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(scsi_bufflen(ctask->sc) == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- task->itt, scsi_bufflen(task->sc),
+- task->imm_count, task->unsol_count);
++ ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->imm_count, ctask->unsol_count);
+ }
+
+- debug_scsi("task deq [cid %d itt 0x%x]\n",
+- conn->id, task->itt);
++ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
++ conn->id, ctask->itt);
+
+ /* Send the cmd PDU */
+- if (!iser_task->command_sent) {
+- error = iser_send_command(conn, task);
++ if (!iser_ctask->command_sent) {
++ error = iser_send_command(conn, ctask);
+ if (error)
+- goto iscsi_iser_task_xmit_exit;
+- iser_task->command_sent = 1;
++ goto iscsi_iser_ctask_xmit_exit;
++ iser_ctask->command_sent = 1;
+ }
+
+ /* Send unsolicited data-out PDU(s) if necessary */
+- if (task->unsol_count)
+- error = iscsi_iser_task_xmit_unsol_data(conn, task);
++ if (ctask->unsol_count)
++ error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
+
+- iscsi_iser_task_xmit_exit:
++ iscsi_iser_ctask_xmit_exit:
+ if (error && error != -ENOBUFS)
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ return error;
+ }
+
+ static void
+-iscsi_iser_cleanup_task(struct iscsi_conn *conn, struct iscsi_task *task)
++iscsi_iser_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+
+- /* mgmt tasks do not need special cleanup */
+- if (!task->sc)
+- return;
++ if (iser_ctask->status == ISER_TASK_STATUS_STARTED) {
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
++ }
++}
++
++static struct iser_conn *
++iscsi_iser_ib_conn_lookup(__u64 ep_handle)
++{
++ struct iser_conn *ib_conn;
++ struct iser_conn *uib_conn = (struct iser_conn *)(unsigned long)ep_handle;
+
+- if (iser_task->status == ISER_TASK_STATUS_STARTED) {
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
++ mutex_lock(&ig.connlist_mutex);
++ list_for_each_entry(ib_conn, &ig.connlist, conn_list) {
++ if (ib_conn == uib_conn) {
++ mutex_unlock(&ig.connlist_mutex);
++ return ib_conn;
++ }
+ }
++ mutex_unlock(&ig.connlist_mutex);
++ iser_err("no conn exists for eph %llx\n",(unsigned long long)ep_handle);
++ return NULL;
+ }
+
+ static struct iscsi_cls_conn *
+@@ -266,7 +275,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_iser_conn *iser_conn;
+
+- cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx);
++ cls_conn = iscsi_conn_setup(cls_session, conn_idx);
+ if (!cls_conn)
+ return NULL;
+ conn = cls_conn->dd_data;
+@@ -277,11 +286,21 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
+ */
+ conn->max_recv_dlength = 128;
+
+- iser_conn = conn->dd_data;
++ iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
++ if (!iser_conn)
++ goto conn_alloc_fail;
++
++ /* currently this is the only field which need to be initiated */
++ rwlock_init(&iser_conn->lock);
++
+ conn->dd_data = iser_conn;
+ iser_conn->iscsi_conn = conn;
+
+ return cls_conn;
++
++conn_alloc_fail:
++ iscsi_conn_teardown(cls_conn);
++ return NULL;
+ }
+
+ static void
+@@ -289,18 +308,11 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn)
+ {
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
+
+ iscsi_conn_teardown(cls_conn);
+- /*
+- * Userspace will normally call the stop callback and
+- * already have freed the ib_conn, but if it goofed up then
+- * we free it here.
+- */
+- if (ib_conn) {
+- ib_conn->iser_conn = NULL;
+- iser_conn_put(ib_conn);
+- }
++ if (iser_conn->ib_conn)
++ iser_conn->ib_conn->iser_conn = NULL;
++ kfree(iser_conn);
+ }
+
+ static int
+@@ -311,7 +323,6 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_conn *conn = cls_conn->dd_data;
+ struct iscsi_iser_conn *iser_conn;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+ int error;
+
+ error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
+@@ -320,14 +331,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+
+ /* the transport ep handle comes from user space so it must be
+ * verified against the global ib connections list */
+- ep = iscsi_lookup_endpoint(transport_eph);
+- if (!ep) {
++ ib_conn = iscsi_iser_ib_conn_lookup(transport_eph);
++ if (!ib_conn) {
+ iser_err("can't bind eph %llx\n",
+ (unsigned long long)transport_eph);
+ return -EINVAL;
+ }
+- ib_conn = ep->dd_data;
+-
+ /* binds the iSER connection retrieved from the previously
+ * connected ep_handle to the iSCSI layer connection. exchanges
+ * connection pointers */
+@@ -335,30 +344,10 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ iser_conn = conn->dd_data;
+ ib_conn->iser_conn = iser_conn;
+ iser_conn->ib_conn = ib_conn;
+- iser_conn_get(ib_conn);
+- return 0;
+-}
+
+-static void
+-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+-{
+- struct iscsi_conn *conn = cls_conn->dd_data;
+- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iser_conn *ib_conn = iser_conn->ib_conn;
++ conn->recv_lock = &iser_conn->lock;
+
+- /*
+- * Userspace may have goofed up and not bound the connection or
+- * might have only partially setup the connection.
+- */
+- if (ib_conn) {
+- iscsi_conn_stop(cls_conn, flag);
+- /*
+- * There is no unbind event so the stop callback
+- * must release the ref from the bind.
+- */
+- iser_conn_put(ib_conn);
+- }
+- iser_conn->ib_conn = NULL;
++ return 0;
+ }
+
+ static int
+@@ -374,75 +363,55 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+ return iscsi_conn_start(cls_conn);
+ }
+
+-static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+-{
+- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+-
+- iscsi_host_remove(shost);
+- iscsi_host_free(shost);
+-}
++static struct iscsi_transport iscsi_iser_transport;
+
+ static struct iscsi_cls_session *
+-iscsi_iser_session_create(struct iscsi_endpoint *ep,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++iscsi_iser_session_create(struct iscsi_transport *iscsit,
++ struct scsi_transport_template *scsit,
++ uint16_t cmds_max, uint16_t qdepth,
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+- struct Scsi_Host *shost;
+ int i;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
+- struct iser_conn *ib_conn;
+-
+- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, ISCSI_MAX_CMD_PER_LUN);
+- if (!shost)
+- return NULL;
+- shost->transportt = iscsi_iser_scsi_transport;
+- shost->max_lun = iscsi_max_lun;
+- shost->max_id = 0;
+- shost->max_channel = 0;
+- shost->max_cmd_len = 16;
+-
+- /*
+- * older userspace tools (before 2.0-870) did not pass us
+- * the leading conn's ep so this will be NULL;
+- */
+- if (ep)
+- ib_conn = ep->dd_data;
+-
+- if (iscsi_host_add(shost,
+- ep ? ib_conn->device->ib_device->dma_device : NULL))
+- goto free_host;
+- *hostno = shost->host_no;
++ uint32_t hn;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_mgmt_task *mtask;
++ struct iscsi_iser_cmd_task *iser_ctask;
++ struct iser_desc *desc;
+
+ /*
+ * we do not support setting can_queue cmd_per_lun from userspace yet
+ * because we preallocate so many resources
+ */
+- cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
++ cls_session = iscsi_session_setup(iscsit, scsit,
+ ISCSI_DEF_XMIT_CMDS_MAX,
+- sizeof(struct iscsi_iser_task),
+- initial_cmdsn, 0);
++ ISCSI_MAX_CMD_PER_LUN,
++ sizeof(struct iscsi_iser_cmd_task),
++ sizeof(struct iser_desc),
++ initial_cmdsn, &hn);
+ if (!cls_session)
+- goto remove_host;
+- session = cls_session->dd_data;
++ return NULL;
++
++ *hostno = hn;
++ session = class_to_transport_session(cls_session);
+
+- shost->can_queue = session->scsi_cmds_max;
+ /* libiscsi setup itts, data and pool so just set desc fields */
+ for (i = 0; i < session->cmds_max; i++) {
+- task = session->cmds[i];
+- iser_task = task->dd_data;
+- task->hdr = (struct iscsi_cmd *)&iser_task->desc.iscsi_header;
+- task->hdr_max = sizeof(iser_task->desc.iscsi_header);
++ ctask = session->cmds[i];
++ iser_ctask = ctask->dd_data;
++ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
++ ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+- return cls_session;
+
+-remove_host:
+- iscsi_host_remove(shost);
+-free_host:
+- iscsi_host_free(shost);
+- return NULL;
++ for (i = 0; i < session->mgmtpool_max; i++) {
++ mtask = session->mgmt_cmds[i];
++ desc = mtask->dd_data;
++ mtask->hdr = &desc->iscsi_header;
++ desc->data = mtask->data;
++ }
++
++ return cls_session;
+ }
+
+ static int
+@@ -515,37 +484,34 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
+ stats->custom[3].value = conn->fmr_unalign_cnt;
+ }
+
+-static struct iscsi_endpoint *
+-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
++static int
++iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking,
++ __u64 *ep_handle)
+ {
+ int err;
+ struct iser_conn *ib_conn;
+- struct iscsi_endpoint *ep;
+
+- ep = iscsi_create_endpoint(sizeof(*ib_conn));
+- if (!ep)
+- return ERR_PTR(-ENOMEM);
++ err = iser_conn_init(&ib_conn);
++ if (err)
++ goto out;
+
+- ib_conn = ep->dd_data;
+- ib_conn->ep = ep;
+- iser_conn_init(ib_conn);
++ err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr, non_blocking);
++ if (!err)
++ *ep_handle = (__u64)(unsigned long)ib_conn;
+
+- err = iser_connect(ib_conn, NULL, (struct sockaddr_in *)dst_addr,
+- non_blocking);
+- if (err) {
+- iscsi_destroy_endpoint(ep);
+- return ERR_PTR(err);
+- }
+- return ep;
++out:
++ return err;
+ }
+
+ static int
+-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
++iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
+ {
+- struct iser_conn *ib_conn;
++ struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ int rc;
+
+- ib_conn = ep->dd_data;
++ if (!ib_conn)
++ return -EINVAL;
++
+ rc = wait_event_interruptible_timeout(ib_conn->wait,
+ ib_conn->state == ISER_CONN_UP,
+ msecs_to_jiffies(timeout_ms));
+@@ -567,21 +533,13 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+ }
+
+ static void
+-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
++iscsi_iser_ep_disconnect(__u64 ep_handle)
+ {
+ struct iser_conn *ib_conn;
+
+- ib_conn = ep->dd_data;
+- if (ib_conn->iser_conn)
+- /*
+- * Must suspend xmit path if the ep is bound to the
+- * iscsi_conn, so we know we are not accessing the ib_conn
+- * when we free it.
+- *
+- * This may not be bound if the ep poll failed.
+- */
+- iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
+-
++ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
++ if (!ib_conn)
++ return;
+
+ iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
+ iser_conn_terminate(ib_conn);
+@@ -592,6 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
++ .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+@@ -625,14 +584,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_USERNAME | ISCSI_PASSWORD |
+ ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO |
+- ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
++ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
++ .host_template = &iscsi_iser_sht,
++ .conndata_size = sizeof(struct iscsi_conn),
++ .max_lun = ISCSI_ISER_MAX_LUN,
++ .max_cmd_len = ISCSI_ISER_MAX_CMD_LEN,
+ /* session management */
+ .create_session = iscsi_iser_session_create,
+- .destroy_session = iscsi_iser_session_destroy,
++ .destroy_session = iscsi_session_teardown,
+ /* connection management */
+ .create_conn = iscsi_iser_conn_create,
+ .bind_conn = iscsi_iser_conn_bind,
+@@ -641,16 +603,17 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_conn_param = iscsi_conn_get_param,
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+- .stop_conn = iscsi_iser_conn_stop,
++ .stop_conn = iscsi_conn_stop,
+ /* iscsi host params */
+ .get_host_param = iscsi_host_get_param,
+ .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
+- .init_task = iscsi_iser_task_init,
+- .xmit_task = iscsi_iser_task_xmit,
+- .cleanup_task = iscsi_iser_cleanup_task,
++ .init_cmd_task = iscsi_iser_cmd_init,
++ .xmit_cmd_task = iscsi_iser_ctask_xmit,
++ .xmit_mgmt_task = iscsi_iser_mtask_xmit,
++ .cleanup_cmd_task = iscsi_iser_cleanup_ctask,
+ /* recovery */
+ .session_recovery_timedout = iscsi_session_recovery_timedout,
+
+@@ -670,6 +633,8 @@ static int __init iser_init(void)
+ return -EINVAL;
+ }
+
++ iscsi_iser_transport.max_lun = iscsi_max_lun;
++
+ memset(&ig, 0, sizeof(struct iser_global));
+
+ ig.desc_cache = kmem_cache_create("iser_descriptors",
+@@ -685,9 +650,7 @@ static int __init iser_init(void)
+ mutex_init(&ig.connlist_mutex);
+ INIT_LIST_HEAD(&ig.connlist);
+
+- iscsi_iser_scsi_transport = iscsi_register_transport(
+- &iscsi_iser_transport);
+- if (!iscsi_iser_scsi_transport) {
++ if (!iscsi_register_transport(&iscsi_iser_transport)) {
+ iser_err("iscsi_register_transport failed\n");
+ err = -EINVAL;
+ goto register_transport_failure;
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 81a8262..a8c1b30 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -36,6 +36,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iscsi_iser.h 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #ifndef __ISCSI_ISER_H__
+ #define __ISCSI_ISER_H__
+@@ -94,6 +96,7 @@
+ /* support upto 512KB in one RDMA */
+ #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
+ #define ISCSI_ISER_MAX_LUN 256
++#define ISCSI_ISER_MAX_CMD_LEN 16
+
+ /* QP settings */
+ /* Maximal bounds on received asynchronous PDUs */
+@@ -171,8 +174,7 @@ struct iser_data_buf {
+ /* fwd declarations */
+ struct iser_device;
+ struct iscsi_iser_conn;
+-struct iscsi_iser_task;
+-struct iscsi_endpoint;
++struct iscsi_iser_cmd_task;
+
+ struct iser_mem_reg {
+ u32 lkey;
+@@ -196,7 +198,7 @@ struct iser_regd_buf {
+ #define MAX_REGD_BUF_VECTOR_LEN 2
+
+ struct iser_dto {
+- struct iscsi_iser_task *task;
++ struct iscsi_iser_cmd_task *ctask;
+ struct iser_conn *ib_conn;
+ int notify_enable;
+
+@@ -240,9 +242,7 @@ struct iser_device {
+
+ struct iser_conn {
+ struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */
+- struct iscsi_endpoint *ep;
+ enum iser_ib_conn_state state; /* rdma connection state */
+- atomic_t refcount;
+ spinlock_t lock; /* used for state changes */
+ struct iser_device *device; /* device context */
+ struct rdma_cm_id *cma_id; /* CMA ID */
+@@ -261,9 +261,11 @@ struct iser_conn {
+ struct iscsi_iser_conn {
+ struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */
+ struct iser_conn *ib_conn; /* iSER IB conn */
++
++ rwlock_t lock;
+ };
+
+-struct iscsi_iser_task {
++struct iscsi_iser_cmd_task {
+ struct iser_desc desc;
+ struct iscsi_iser_conn *iser_conn;
+ enum iser_task_status status;
+@@ -296,26 +298,22 @@ extern int iser_debug_level;
+ /* allocate connection resources needed for rdma functionality */
+ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn);
+
+-int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_control(struct iscsi_conn *conn,
++ struct iscsi_mgmt_task *mtask);
+
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task);
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask);
+
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
+- struct iscsi_data *hdr);
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
++ struct iscsi_data *hdr);
+
+ void iscsi_iser_recv(struct iscsi_conn *conn,
+ struct iscsi_hdr *hdr,
+ char *rx_data,
+ int rx_data_len);
+
+-void iser_conn_init(struct iser_conn *ib_conn);
+-
+-void iser_conn_get(struct iser_conn *ib_conn);
+-
+-void iser_conn_put(struct iser_conn *ib_conn);
++int iser_conn_init(struct iser_conn **ib_conn);
+
+ void iser_conn_terminate(struct iser_conn *ib_conn);
+
+@@ -324,9 +322,9 @@ void iser_rcv_completion(struct iser_desc *desc,
+
+ void iser_snd_completion(struct iser_desc *desc);
+
+-void iser_task_rdma_init(struct iscsi_iser_task *task);
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *ctask);
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *task);
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *ctask);
+
+ void iser_dto_buffs_release(struct iser_dto *dto);
+
+@@ -336,10 +334,10 @@ void iser_reg_single(struct iser_device *device,
+ struct iser_regd_buf *regd_buf,
+ enum dma_data_direction direction);
+
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *ctask,
+ enum iser_data_dir cmd_dir);
+
+ int iser_connect(struct iser_conn *ib_conn,
+@@ -359,10 +357,10 @@ int iser_post_send(struct iser_desc *tx_desc);
+ int iser_conn_state_comp(struct iser_conn *ib_conn,
+ enum iser_ib_conn_state comp);
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir);
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask);
+ #endif
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index cdd2831..08dc81c 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_initiator.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+@@ -64,46 +66,46 @@ static void iser_dto_add_regd_buff(struct iser_dto *dto,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * iser_task->data[ISER_DIR_IN].data_len
++ * iser_ctask->data[ISER_DIR_IN].data_len
+ */
+-static int iser_prepare_read_cmd(struct iscsi_task *task,
++static int iser_prepare_read_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int edtl)
+
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_in = &iser_ctask->data[ISER_DIR_IN];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_in,
+ ISER_DIR_IN,
+ DMA_FROM_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_IN].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: "
+ "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_IN].data_len, edtl,
+- task->itt, iser_task->iser_conn);
++ iser_ctask->data[ISER_DIR_IN].data_len, edtl,
++ ctask->itt, iser_ctask->iser_conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_IN);
+ if (err) {
+ iser_err("Failed to set up Data-IN RDMA\n");
+ return err;
+ }
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_IN];
+
+ hdr->flags |= ISER_RSV;
+ hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
+ hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+
+ iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va);
+
+ return 0;
+@@ -111,43 +113,43 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
+
+ /* Register user buffer memory and initialize passive rdma
+ * dto descriptor. Total data size is stored in
+- * task->data[ISER_DIR_OUT].data_len
++ * ctask->data[ISER_DIR_OUT].data_len
+ */
+ static int
+-iser_prepare_write_cmd(struct iscsi_task *task,
++iser_prepare_write_cmd(struct iscsi_cmd_task *ctask,
+ unsigned int imm_sz,
+ unsigned int unsol_sz,
+ unsigned int edtl)
+ {
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_regd_buf *regd_buf;
+ int err;
+- struct iser_dto *send_dto = &iser_task->desc.dto;
+- struct iser_hdr *hdr = &iser_task->desc.iser_header;
+- struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
++ struct iser_dto *send_dto = &iser_ctask->desc.dto;
++ struct iser_hdr *hdr = &iser_ctask->desc.iser_header;
++ struct iser_data_buf *buf_out = &iser_ctask->data[ISER_DIR_OUT];
+
+- err = iser_dma_map_task_data(iser_task,
++ err = iser_dma_map_task_data(iser_ctask,
+ buf_out,
+ ISER_DIR_OUT,
+ DMA_TO_DEVICE);
+ if (err)
+ return err;
+
+- if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (edtl > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Total data length: %ld, less than EDTL: %d, "
+ "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
+- iser_task->data[ISER_DIR_OUT].data_len,
+- edtl, task->itt, task->conn);
++ iser_ctask->data[ISER_DIR_OUT].data_len,
++ edtl, ctask->itt, ctask->conn);
+ return -EINVAL;
+ }
+
+- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
++ err = iser_reg_rdma_mem(iser_ctask,ISER_DIR_OUT);
+ if (err != 0) {
+ iser_err("Failed to register write cmd RDMA mem\n");
+ return err;
+ }
+
+- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
++ regd_buf = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+
+ if (unsol_sz < edtl) {
+ hdr->flags |= ISER_WSV;
+@@ -156,13 +158,13 @@ iser_prepare_write_cmd(struct iscsi_task *task,
+
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
+ "VA:%#llX + unsol:%d\n",
+- task->itt, regd_buf->reg.rkey,
++ ctask->itt, regd_buf->reg.rkey,
+ (unsigned long long)regd_buf->reg.va, unsol_sz);
+ }
+
+ if (imm_sz > 0) {
+ iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
+- task->itt, imm_sz);
++ ctask->itt, imm_sz);
+ iser_dto_add_regd_buff(send_dto,
+ regd_buf,
+ 0,
+@@ -314,38 +316,38 @@ iser_check_xmit(struct iscsi_conn *conn, void *task)
+ /**
+ * iser_send_command - send command PDU
+ */
+-int iser_send_command(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++int iser_send_command(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long edtl;
+ int err = 0;
+ struct iser_data_buf *data_buf;
+
+- struct iscsi_cmd *hdr = task->hdr;
+- struct scsi_cmnd *sc = task->sc;
++ struct iscsi_cmd *hdr = ctask->hdr;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
+ iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
+ return -EPERM;
+ }
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ edtl = ntohl(hdr->data_length);
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
+- send_dto = &iser_task->desc.dto;
+- send_dto->task = iser_task;
+- iser_create_send_desc(iser_conn, &iser_task->desc);
++ iser_ctask->desc.type = ISCSI_TX_SCSI_COMMAND;
++ send_dto = &iser_ctask->desc.dto;
++ send_dto->ctask = iser_ctask;
++ iser_create_send_desc(iser_conn, &iser_ctask->desc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
+- data_buf = &iser_task->data[ISER_DIR_IN];
++ data_buf = &iser_ctask->data[ISER_DIR_IN];
+ else
+- data_buf = &iser_task->data[ISER_DIR_OUT];
++ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+ if (scsi_sg_count(sc)) { /* using a scatter list */
+ data_buf->buf = scsi_sglist(sc);
+@@ -355,15 +357,15 @@ int iser_send_command(struct iscsi_conn *conn,
+ data_buf->data_len = scsi_bufflen(sc);
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+- err = iser_prepare_read_cmd(task, edtl);
++ err = iser_prepare_read_cmd(ctask, edtl);
+ if (err)
+ goto send_command_error;
+ }
+ if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
+- err = iser_prepare_write_cmd(task,
+- task->imm_count,
+- task->imm_count +
+- task->unsol_count,
++ err = iser_prepare_write_cmd(ctask,
++ ctask->imm_count,
++ ctask->imm_count +
++ ctask->unsol_count,
+ edtl);
+ if (err)
+ goto send_command_error;
+@@ -378,27 +380,27 @@ int iser_send_command(struct iscsi_conn *conn,
+ goto send_command_error;
+ }
+
+- iser_task->status = ISER_TASK_STATUS_STARTED;
++ iser_ctask->status = ISER_TASK_STATUS_STARTED;
+
+- err = iser_post_send(&iser_task->desc);
++ err = iser_post_send(&iser_ctask->desc);
+ if (!err)
+ return 0;
+
+ send_command_error:
+ iser_dto_buffs_release(send_dto);
+- iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
++ iser_err("conn %p failed ctask->itt %d err %d\n",conn, ctask->itt, err);
+ return err;
+ }
+
+ /**
+ * iser_send_data_out - send data out PDU
+ */
+-int iser_send_data_out(struct iscsi_conn *conn,
+- struct iscsi_task *task,
++int iser_send_data_out(struct iscsi_conn *conn,
++ struct iscsi_cmd_task *ctask,
+ struct iscsi_data *hdr)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
++ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ struct iser_desc *tx_desc = NULL;
+ struct iser_dto *send_dto = NULL;
+ unsigned long buf_offset;
+@@ -411,7 +413,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn, ctask))
+ return -ENOBUFS;
+
+ itt = (__force uint32_t)hdr->itt;
+@@ -432,7 +434,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ send_dto = &tx_desc->dto;
+- send_dto->task = iser_task;
++ send_dto->ctask = iser_ctask;
+ iser_create_send_desc(iser_conn, tx_desc);
+
+ iser_reg_single(iser_conn->ib_conn->device,
+@@ -440,15 +442,15 @@ int iser_send_data_out(struct iscsi_conn *conn,
+
+ /* all data was registered for RDMA, we can use the lkey */
+ iser_dto_add_regd_buff(send_dto,
+- &iser_task->rdma_regd[ISER_DIR_OUT],
++ &iser_ctask->rdma_regd[ISER_DIR_OUT],
+ buf_offset,
+ data_seg_len);
+
+- if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
++ if (buf_offset + data_seg_len > iser_ctask->data[ISER_DIR_OUT].data_len) {
+ iser_err("Offset:%ld & DSL:%ld in Data-Out "
+ "inconsistent with total len:%ld, itt:%d\n",
+ buf_offset, data_seg_len,
+- iser_task->data[ISER_DIR_OUT].data_len, itt);
++ iser_ctask->data[ISER_DIR_OUT].data_len, itt);
+ err = -EINVAL;
+ goto send_data_out_error;
+ }
+@@ -468,11 +470,10 @@ send_data_out_error:
+ }
+
+ int iser_send_control(struct iscsi_conn *conn,
+- struct iscsi_task *task)
++ struct iscsi_mgmt_task *mtask)
+ {
+ struct iscsi_iser_conn *iser_conn = conn->dd_data;
+- struct iscsi_iser_task *iser_task = task->dd_data;
+- struct iser_desc *mdesc = &iser_task->desc;
++ struct iser_desc *mdesc = mtask->dd_data;
+ struct iser_dto *send_dto = NULL;
+ unsigned long data_seg_len;
+ int err = 0;
+@@ -484,27 +485,27 @@ int iser_send_control(struct iscsi_conn *conn,
+ return -EPERM;
+ }
+
+- if (iser_check_xmit(conn, task))
++ if (iser_check_xmit(conn,mtask))
+ return -ENOBUFS;
+
+ /* build the tx desc regd header and add it to the tx desc dto */
+ mdesc->type = ISCSI_TX_CONTROL;
+ send_dto = &mdesc->dto;
+- send_dto->task = NULL;
++ send_dto->ctask = NULL;
+ iser_create_send_desc(iser_conn, mdesc);
+
+ device = iser_conn->ib_conn->device;
+
+ iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
+
+- data_seg_len = ntoh24(task->hdr->dlength);
++ data_seg_len = ntoh24(mtask->hdr->dlength);
+
+ if (data_seg_len > 0) {
+ regd_buf = &mdesc->data_regd_buf;
+ memset(regd_buf, 0, sizeof(struct iser_regd_buf));
+ regd_buf->device = device;
+- regd_buf->virt_addr = task->data;
+- regd_buf->data_size = task->data_count;
++ regd_buf->virt_addr = mtask->data;
++ regd_buf->data_size = mtask->data_count;
+ iser_reg_single(device, regd_buf,
+ DMA_TO_DEVICE);
+ iser_dto_add_regd_buff(send_dto, regd_buf,
+@@ -534,13 +535,15 @@ send_control_error:
+ void iser_rcv_completion(struct iser_desc *rx_desc,
+ unsigned long dto_xfer_len)
+ {
+- struct iser_dto *dto = &rx_desc->dto;
++ struct iser_dto *dto = &rx_desc->dto;
+ struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
+- struct iscsi_task *task;
+- struct iscsi_iser_task *iser_task;
++ struct iscsi_session *session = conn->iscsi_conn->session;
++ struct iscsi_cmd_task *ctask;
++ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iscsi_hdr *hdr;
+ char *rx_data = NULL;
+ int rx_data_len = 0;
++ unsigned int itt;
+ unsigned char opcode;
+
+ hdr = &rx_desc->iscsi_header;
+@@ -556,24 +559,19 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
+- spin_lock(&conn->iscsi_conn->session->lock);
+- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
+- if (task)
+- __iscsi_get_task(task);
+- spin_unlock(&conn->iscsi_conn->session->lock);
+-
+- if (!task)
++ itt = get_itt(hdr->itt); /* mask out cid and age bits */
++ if (!(itt < session->cmds_max))
+ iser_err("itt can't be matched to task!!! "
+- "conn %p opcode %d itt %d\n",
+- conn->iscsi_conn, opcode, hdr->itt);
+- else {
+- iser_task = task->dd_data;
+- iser_dbg("itt %d task %p\n",hdr->itt, task);
+- iser_task->status = ISER_TASK_STATUS_COMPLETED;
+- iser_task_rdma_finalize(iser_task);
+- iscsi_put_task(task);
+- }
++ "conn %p opcode %d cmds_max %d itt %d\n",
++ conn->iscsi_conn,opcode,session->cmds_max,itt);
++ /* use the mapping given with the cmds array indexed by itt */
++ ctask = (struct iscsi_cmd_task *)session->cmds[itt];
++ iser_ctask = ctask->dd_data;
++ iser_dbg("itt %d ctask %p\n",itt,ctask);
++ iser_ctask->status = ISER_TASK_STATUS_COMPLETED;
++ iser_ctask_rdma_finalize(iser_ctask);
+ }
++
+ iser_dto_buffs_release(dto);
+
+ iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+@@ -594,7 +592,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iser_conn *ib_conn = dto->ib_conn;
+ struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
+ struct iscsi_conn *conn = iser_conn->iscsi_conn;
+- struct iscsi_task *task;
++ struct iscsi_mgmt_task *mtask;
+ int resume_tx = 0;
+
+ iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
+@@ -617,31 +615,36 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+
+ if (tx_desc->type == ISCSI_TX_CONTROL) {
+ /* this arithmetic is legal by libiscsi dd_data allocation */
+- task = (void *) ((long)(void *)tx_desc -
+- sizeof(struct iscsi_task));
+- if (task->hdr->itt == RESERVED_ITT)
+- iscsi_put_task(task);
++ mtask = (void *) ((long)(void *)tx_desc -
++ sizeof(struct iscsi_mgmt_task));
++ if (mtask->hdr->itt == RESERVED_ITT) {
++ struct iscsi_session *session = conn->session;
++
++ spin_lock(&conn->session->lock);
++ iscsi_free_mgmt_task(conn, mtask);
++ spin_unlock(&session->lock);
++ }
+ }
+ }
+
+-void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_init(struct iscsi_iser_cmd_task *iser_ctask)
+
+ {
+- iser_task->status = ISER_TASK_STATUS_INIT;
++ iser_ctask->status = ISER_TASK_STATUS_INIT;
+
+- iser_task->dir[ISER_DIR_IN] = 0;
+- iser_task->dir[ISER_DIR_OUT] = 0;
++ iser_ctask->dir[ISER_DIR_IN] = 0;
++ iser_ctask->dir[ISER_DIR_OUT] = 0;
+
+- iser_task->data[ISER_DIR_IN].data_len = 0;
+- iser_task->data[ISER_DIR_OUT].data_len = 0;
++ iser_ctask->data[ISER_DIR_IN].data_len = 0;
++ iser_ctask->data[ISER_DIR_OUT].data_len = 0;
+
+- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_IN], 0,
+ sizeof(struct iser_regd_buf));
+- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
++ memset(&iser_ctask->rdma_regd[ISER_DIR_OUT], 0,
+ sizeof(struct iser_regd_buf));
+ }
+
+-void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
++void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ int deferred;
+ int is_rdma_aligned = 1;
+@@ -650,17 +653,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ /* if we were reading, copy back to unaligned sglist,
+ * anyway dma_unmap and free the copy
+ */
+- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_IN);
+ }
+- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
++ if (iser_ctask->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ is_rdma_aligned = 0;
+- iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
++ iser_finalize_rdma_unaligned_sg(iser_ctask, ISER_DIR_OUT);
+ }
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-IN rdma reg\n",
+@@ -668,8 +671,8 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+ }
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
+ deferred = iser_regd_buff_release(regd);
+ if (deferred) {
+ iser_err("%d references remain for BUF-OUT rdma reg\n",
+@@ -679,7 +682,7 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
+
+ /* if the data was unaligned, it was already unmapped and then copied */
+ if (is_rdma_aligned)
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+ }
+
+ void iser_dto_buffs_release(struct iser_dto *dto)
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index b9453d0..cac50c4 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -28,6 +28,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $
+ */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+@@ -99,13 +101,13 @@ void iser_reg_single(struct iser_device *device,
+ /**
+ * iser_start_rdma_unaligned_sg
+ */
+-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ int dma_nents;
+ struct ib_device *dev;
+ char *mem = NULL;
+- struct iser_data_buf *data = &iser_task->data[cmd_dir];
++ struct iser_data_buf *data = &iser_ctask->data[cmd_dir];
+ unsigned long cmd_data_len = data->data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+@@ -138,37 +140,37 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
+- iser_task->data_copy[cmd_dir].buf =
+- &iser_task->data_copy[cmd_dir].sg_single;
+- iser_task->data_copy[cmd_dir].size = 1;
++ sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len);
++ iser_ctask->data_copy[cmd_dir].buf =
++ &iser_ctask->data_copy[cmd_dir].sg_single;
++ iser_ctask->data_copy[cmd_dir].size = 1;
+
+- iser_task->data_copy[cmd_dir].copy_buf = mem;
++ iser_ctask->data_copy[cmd_dir].copy_buf = mem;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+ dma_nents = ib_dma_map_sg(dev,
+- &iser_task->data_copy[cmd_dir].sg_single,
++ &iser_ctask->data_copy[cmd_dir].sg_single,
+ 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ BUG_ON(dma_nents == 0);
+
+- iser_task->data_copy[cmd_dir].dma_nents = dma_nents;
++ iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents;
+ return 0;
+ }
+
+ /**
+ * iser_finalize_rdma_unaligned_sg
+ */
+-void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
++void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *mem_copy;
+ unsigned long cmd_data_len;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
+- mem_copy = &iser_task->data_copy[cmd_dir];
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
++ mem_copy = &iser_ctask->data_copy[cmd_dir];
+
+ ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1,
+ (cmd_dir == ISER_DIR_OUT) ?
+@@ -184,8 +186,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ /* copy back read RDMA to unaligned sg */
+ mem = mem_copy->copy_buf;
+
+- sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf;
+- sg_size = iser_task->data[ISER_DIR_IN].size;
++ sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf;
++ sg_size = iser_ctask->data[ISER_DIR_IN].size;
+
+ p = mem;
+ for_each_sg(sgl, sg, sg_size, i) {
+@@ -198,7 +200,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ }
+ }
+
+- cmd_data_len = iser_task->data[cmd_dir].data_len;
++ cmd_data_len = iser_ctask->data[cmd_dir].data_len;
+
+ if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
+ free_pages((unsigned long)mem_copy->copy_buf,
+@@ -376,15 +378,15 @@ static void iser_page_vec_build(struct iser_data_buf *data,
+ }
+ }
+
+-int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+- struct iser_data_buf *data,
+- enum iser_data_dir iser_dir,
+- enum dma_data_direction dma_dir)
++int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask,
++ struct iser_data_buf *data,
++ enum iser_data_dir iser_dir,
++ enum dma_data_direction dma_dir)
+ {
+ struct ib_device *dev;
+
+- iser_task->dir[iser_dir] = 1;
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ iser_ctask->dir[iser_dir] = 1;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+ data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ if (data->dma_nents == 0) {
+@@ -394,20 +396,20 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
+ return 0;
+ }
+
+-void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
++void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
+ {
+ struct ib_device *dev;
+ struct iser_data_buf *data;
+
+- dev = iser_task->iser_conn->ib_conn->device->ib_device;
++ dev = iser_ctask->iser_conn->ib_conn->device->ib_device;
+
+- if (iser_task->dir[ISER_DIR_IN]) {
+- data = &iser_task->data[ISER_DIR_IN];
++ if (iser_ctask->dir[ISER_DIR_IN]) {
++ data = &iser_ctask->data[ISER_DIR_IN];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+ }
+
+- if (iser_task->dir[ISER_DIR_OUT]) {
+- data = &iser_task->data[ISER_DIR_OUT];
++ if (iser_ctask->dir[ISER_DIR_OUT]) {
++ data = &iser_ctask->data[ISER_DIR_OUT];
+ ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE);
+ }
+ }
+@@ -418,21 +420,21 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
+ *
+ * returns 0 on success, errno code on failure
+ */
+-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
++int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
+ enum iser_data_dir cmd_dir)
+ {
+- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+- struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
++ struct iscsi_conn *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
++ struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+- struct iser_data_buf *mem = &iser_task->data[cmd_dir];
++ struct iser_data_buf *mem = &iser_ctask->data[cmd_dir];
+ struct iser_regd_buf *regd_buf;
+ int aligned_len;
+ int err;
+ int i;
+ struct scatterlist *sg;
+
+- regd_buf = &iser_task->rdma_regd[cmd_dir];
++ regd_buf = &iser_ctask->rdma_regd[cmd_dir];
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+@@ -442,13 +444,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+- iser_dma_unmap_task_data(iser_task);
++ iser_dma_unmap_task_data(iser_ctask);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
++ if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0)
+ return -ENOMEM;
+- mem = &iser_task->data_copy[cmd_dir];
++ mem = &iser_ctask->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, FMR is not needed */
+@@ -472,9 +474,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
+ err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg);
+ if (err) {
+ iser_data_buf_dump(mem, ibdev);
+- iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
+- mem->dma_nents,
+- ntoh24(iser_task->desc.iscsi_header.dlength));
++ iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents,
++ ntoh24(iser_ctask->desc.iscsi_header.dlength));
+ iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+ ib_conn->page_vec->data_size, ib_conn->page_vec->length,
+ ib_conn->page_vec->offset);
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 63462ec..d19cfe6 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -29,6 +29,8 @@
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
++ *
++ * $Id: iser_verbs.c 7051 2006-05-10 12:29:11Z ogerlitz $
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -323,18 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
+ iser_device_try_release(device);
+ if (ib_conn->iser_conn)
+ ib_conn->iser_conn->ib_conn = NULL;
+- iscsi_destroy_endpoint(ib_conn->ep);
+-}
+-
+-void iser_conn_get(struct iser_conn *ib_conn)
+-{
+- atomic_inc(&ib_conn->refcount);
+-}
+-
+-void iser_conn_put(struct iser_conn *ib_conn)
+-{
+- if (atomic_dec_and_test(&ib_conn->refcount))
+- iser_conn_release(ib_conn);
++ kfree(ib_conn);
+ }
+
+ /**
+@@ -358,7 +349,7 @@ void iser_conn_terminate(struct iser_conn *ib_conn)
+ wait_event_interruptible(ib_conn->wait,
+ ib_conn->state == ISER_CONN_DOWN);
+
+- iser_conn_put(ib_conn);
++ iser_conn_release(ib_conn);
+ }
+
+ static void iser_connect_error(struct rdma_cm_id *cma_id)
+@@ -483,7 +474,6 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ break;
+ case RDMA_CM_EVENT_DISCONNECTED:
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+- case RDMA_CM_EVENT_ADDR_CHANGE:
+ iser_disconnected_handler(cma_id);
+ break;
+ default:
+@@ -493,15 +483,24 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
+ return ret;
+ }
+
+-void iser_conn_init(struct iser_conn *ib_conn)
++int iser_conn_init(struct iser_conn **ibconn)
+ {
++ struct iser_conn *ib_conn;
++
++ ib_conn = kzalloc(sizeof *ib_conn, GFP_KERNEL);
++ if (!ib_conn) {
++ iser_err("can't alloc memory for struct iser_conn\n");
++ return -ENOMEM;
++ }
+ ib_conn->state = ISER_CONN_INIT;
+ init_waitqueue_head(&ib_conn->wait);
+ atomic_set(&ib_conn->post_recv_buf_count, 0);
+ atomic_set(&ib_conn->post_send_buf_count, 0);
+- atomic_set(&ib_conn->refcount, 1);
+ INIT_LIST_HEAD(&ib_conn->conn_list);
+ spin_lock_init(&ib_conn->lock);
++
++ *ibconn = ib_conn;
++ return 0;
+ }
+
+ /**
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U7/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch b/kernel_patches/backport/2.6.9_U7/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
new file mode 100644
index 0000000..101fdc6
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_01_revert_da9c0c770e775e655e3f77c96d91ee557b117adb.patch
@@ -0,0 +1,44 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..75ecabe 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -211,10 +211,10 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ int error = 0;
+
+ if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(scsi_bufflen(ctask->sc) == 0);
++ BUG_ON(ctask->sc->request_bufflen == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, scsi_bufflen(ctask->sc),
++ ctask->itt, ctask->sc->request_bufflen,
+ ctask->imm_count, ctask->unsol_count);
+ }
+
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 5d62cc0..1ae80d8 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -349,12 +349,18 @@ int iser_send_command(struct iscsi_conn *conn,
+ else
+ data_buf = &iser_ctask->data[ISER_DIR_OUT];
+
+- if (scsi_sg_count(sc)) { /* using a scatter list */
+- data_buf->buf = scsi_sglist(sc);
+- data_buf->size = scsi_sg_count(sc);
++ if (sc->use_sg) { /* using a scatter list */
++ data_buf->buf = sc->request_buffer;
++ data_buf->size = sc->use_sg;
++ } else if (sc->request_bufflen) {
++ /* using a single buffer - convert it into one entry SG */
++ sg_init_one(&data_buf->sg_single,
++ sc->request_buffer, sc->request_bufflen);
++ data_buf->buf = &data_buf->sg_single;
++ data_buf->size = 1;
+ }
+
+- data_buf->data_len = scsi_bufflen(sc);
++ data_buf->data_len = sc->request_bufflen;
+
+ if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ err = iser_prepare_read_cmd(ctask, edtl);
diff --git a/kernel_patches/backport/2.6.9_U7/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch b/kernel_patches/backport/2.6.9_U7/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
new file mode 100644
index 0000000..7b21cba
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_02_revert_d8196ed2181b4595eaf464a5bcbddb6c28649a39.patch
@@ -0,0 +1,12 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..933429b 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -586,7 +586,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+ ISCSI_PING_TMO | ISCSI_RECV_TMO,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+- ISCSI_HOST_NETDEV_NAME |
+ ISCSI_HOST_INITIATOR_NAME,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
diff --git a/kernel_patches/backport/2.6.9_U7/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch b/kernel_patches/backport/2.6.9_U7/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
new file mode 100644
index 0000000..d72eb5a
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_03_revert_1548271ece9e9312fd5feb41fd58773b56a71d39.patch
@@ -0,0 +1,74 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index bad8dac..7baac99 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -368,8 +368,7 @@ static struct iscsi_transport iscsi_iser_transport;
+ static struct iscsi_cls_session *
+ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct scsi_transport_template *scsit,
+- uint16_t cmds_max, uint16_t qdepth,
+- uint32_t initial_cmdsn, uint32_t *hostno)
++ uint32_t initial_cmdsn, uint32_t *hostno)
+ {
+ struct iscsi_cls_session *cls_session;
+ struct iscsi_session *session;
+@@ -380,13 +380,7 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ struct iscsi_iser_cmd_task *iser_ctask;
+ struct iser_desc *desc;
+
+- /*
+- * we do not support setting can_queue cmd_per_lun from userspace yet
+- * because we preallocate so many resources
+- */
+ cls_session = iscsi_session_setup(iscsit, scsit,
+- ISCSI_DEF_XMIT_CMDS_MAX,
+- ISCSI_MAX_CMD_PER_LUN,
+ sizeof(struct iscsi_iser_cmd_task),
+ sizeof(struct iser_desc),
+ initial_cmdsn, &hn);
+@@ -550,7 +550,7 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+ .change_queue_depth = iscsi_change_queue_depth,
+- .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
++ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 1ee867b..671faff 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -105,7 +105,7 @@
+ #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
+ * SCSI_TMFUNC(2), LOGOUT(1) */
+
+-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
++#define ISER_QP_MAX_RECV_DTOS (ISCSI_XMIT_CMDS_MAX + \
+ ISER_MAX_RX_MISC_PDUS + \
+ ISER_MAX_TX_MISC_PDUS)
+
+@@ -117,7 +117,7 @@
+
+ #define ISER_INFLIGHT_DATAOUTS 8
+
+-#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
++#define ISER_QP_MAX_REQ_DTOS (ISCSI_XMIT_CMDS_MAX * \
+ (1 + ISER_INFLIGHT_DATAOUTS) + \
+ ISER_MAX_TX_MISC_PDUS + \
+ ISER_MAX_RX_MISC_PDUS)
+diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
+index 654a4dc..f3d8ba5 100644
+--- a/drivers/infiniband/ulp/iser/iser_verbs.c
++++ b/drivers/infiniband/ulp/iser/iser_verbs.c
+@@ -154,8 +154,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+ params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
+ /* make the pool size twice the max number of SCSI commands *
+ * the ML is expected to queue, watermark for unmap at 50% */
+- params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
+- params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
++ params.pool_size = ISCSI_XMIT_CMDS_MAX * 2;
++ params.dirty_watermark = ISCSI_XMIT_CMDS_MAX;
+ params.cache = 0;
+ params.flush_function = NULL;
+ params.access = (IB_ACCESS_LOCAL_WRITE |
diff --git a/kernel_patches/backport/2.6.9_U7/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch b/kernel_patches/backport/2.6.9_U7/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
new file mode 100644
index 0000000..26fa09c
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_04_revert_77a23c21aaa723f6b0ffc4a701be8c8e5a32346d.patch
@@ -0,0 +1,38 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 8f7b859..5f82d6c 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -134,9 +134,18 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ {
+ struct iscsi_iser_conn *iser_conn = ctask->conn->dd_data;
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
++ struct scsi_cmnd *sc = ctask->sc;
+
+ iser_ctask->command_sent = 0;
+ iser_ctask->iser_conn = iser_conn;
++ if (sc->sc_data_direction == DMA_TO_DEVICE) {
++ BUG_ON(sc->request_bufflen == 0);
++
++ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
++ ctask->itt, sc->request_bufflen, ctask->imm_count,
++ ctask->unsol_count);
++ }
++
+ iser_ctask_rdma_init(iser_ctask);
+ return 0;
+ }
+@@ -210,14 +219,6 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
+ struct iscsi_iser_cmd_task *iser_ctask = ctask->dd_data;
+ int error = 0;
+
+- if (ctask->sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(ctask->sc->request_bufflen == 0);
+-
+- debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, ctask->sc->request_bufflen,
+- ctask->imm_count, ctask->unsol_count);
+- }
+-
+ debug_scsi("ctask deq [cid %d itt 0x%x]\n",
+ conn->id, ctask->itt);
+
diff --git a/kernel_patches/backport/2.6.9_U7/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch b/kernel_patches/backport/2.6.9_U7/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
new file mode 100644
index 0000000..417415f
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_05_revert_b2c6416736b847b91950bd43cc5153e11a1f83ee.patch
@@ -0,0 +1,18 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 5f82d6c..3a67d76 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -574,11 +574,8 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_EXP_STATSN |
+ ISCSI_PERSISTENT_PORT |
+ ISCSI_PERSISTENT_ADDRESS |
+- ISCSI_TARGET_NAME | ISCSI_TPGT |
+- ISCSI_USERNAME | ISCSI_PASSWORD |
+- ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+- ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+- ISCSI_PING_TMO | ISCSI_RECV_TMO,
++ ISCSI_TARGET_NAME |
++ ISCSI_TPGT,
+ .host_param_mask = ISCSI_HOST_HWADDRESS |
+ ISCSI_HOST_INITIATOR_NAME,
+ .host_template = &iscsi_iser_sht,
diff --git a/kernel_patches/backport/2.6.9_U7/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch b/kernel_patches/backport/2.6.9_U7/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
new file mode 100644
index 0000000..0b1a4c4
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_06_revert_857ae0bdb72999936a28ce621e38e2e288c485da.patch
@@ -0,0 +1,16 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index c5941fa..2f4f125 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -140,8 +140,8 @@ iscsi_iser_cmd_init(struct iscsi_cmd_task *ctask)
+ iser_ctask->iser_conn = iser_conn;
+ if (sc->sc_data_direction == DMA_TO_DEVICE) {
+- BUG_ON(sc->request_bufflen == 0);
++ BUG_ON(ctask->total_length == 0);
+
+ debug_scsi("cmd [itt %x total %d imm %d unsol_data %d\n",
+- ctask->itt, sc->request_bufflen, ctask->imm_count,
++ ctask->itt, ctask->total_length, ctask->imm_count,
+ ctask->unsol_count);
+ }
+
diff --git a/kernel_patches/backport/2.6.9_U7/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch b/kernel_patches/backport/2.6.9_U7/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
new file mode 100644
index 0000000..f207af3
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_07_revert_8ad5781ae9702a8f95cfdf30967752e4297613ee.patch
@@ -0,0 +1,14 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 2f4f125..940bf98 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -576,8 +576,7 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME |
+ ISCSI_TPGT,
+- .host_param_mask = ISCSI_HOST_HWADDRESS |
+- ISCSI_HOST_INITIATOR_NAME,
++ .host_param_mask = ISCSI_HOST_HWADDRESS,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
diff --git a/kernel_patches/backport/2.6.9_U7/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch b/kernel_patches/backport/2.6.9_U7/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
new file mode 100644
index 0000000..f9dceb1
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_08_revert_0801c242a33426fddc005c2f559a3d2fa6fca7eb.patch
@@ -0,0 +1,22 @@
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 940bf98..6a35eff 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -576,7 +576,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ ISCSI_PERSISTENT_ADDRESS |
+ ISCSI_TARGET_NAME |
+ ISCSI_TPGT,
+- .host_param_mask = ISCSI_HOST_HWADDRESS,
+ .host_template = &iscsi_iser_sht,
+ .conndata_size = sizeof(struct iscsi_conn),
+ .max_lun = ISCSI_ISER_MAX_LUN,
+@@ -593,9 +593,6 @@ static struct iscsi_transport iscsi_iser_transport = {
+ .get_session_param = iscsi_session_get_param,
+ .start_conn = iscsi_iser_conn_start,
+ .stop_conn = iscsi_conn_stop,
+- /* iscsi host params */
+- .get_host_param = iscsi_host_get_param,
+- .set_host_param = iscsi_host_set_param,
+ /* IO */
+ .send_pdu = iscsi_conn_send_pdu,
+ .get_stats = iscsi_iser_conn_get_stats,
diff --git a/kernel_patches/backport/2.6.9_U7/iser_09_fix_inclusion_order.patch b/kernel_patches/backport/2.6.9_U7/iser_09_fix_inclusion_order.patch
new file mode 100644
index 0000000..3c2a969
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_09_fix_inclusion_order.patch
@@ -0,0 +1,13 @@
+--- linux-2.6.20-rc7-orig/drivers/infiniband/ulp/iser/iscsi_iser.c 2007-02-08 09:13:43.000000000 +0200
++++ linux-2.6.20-rc7/drivers/infiniband/ulp/iser/iscsi_iser.c 2007-02-08 09:14:31.000000000 +0200
+@@ -70,9 +70,8 @@
+ #include <scsi/scsi_tcq.h>
+ #include <scsi/scsi_host.h>
+ #include <scsi/scsi.h>
+-#include <scsi/scsi_transport_iscsi.h>
+-
+ #include "iscsi_iser.h"
++#include <scsi/scsi_transport_iscsi.h>
+
+ static unsigned int iscsi_max_lun = 512;
+ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
diff --git a/kernel_patches/backport/2.6.9_U7/iser_10_fix_struct_scsi_host_template.patch b/kernel_patches/backport/2.6.9_U7/iser_10_fix_struct_scsi_host_template.patch
new file mode 100644
index 0000000..5b28ac4
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_10_fix_struct_scsi_host_template.patch
@@ -0,0 +1,31 @@
+From 828e0ad429b92cf75781770ceb9ef7086f34fde2 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:31:42 +0300
+Subject: [PATCH] fix_struct_scsi_host_template
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 2 --
+ 1 files changed, 0 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 9bf24c6..de1e783 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -542,13 +542,11 @@ static struct scsi_host_template iscsi_iser_sht = {
+ .module = THIS_MODULE,
+ .name = "iSCSI Initiator over iSER, v." DRV_VER,
+ .queuecommand = iscsi_queuecommand,
+- .change_queue_depth = iscsi_change_queue_depth,
+ .can_queue = ISCSI_XMIT_CMDS_MAX - 1,
+ .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
+ .max_sectors = 1024,
+ .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN,
+ .eh_abort_handler = iscsi_eh_abort,
+- .eh_device_reset_handler= iscsi_eh_device_reset,
+ .eh_host_reset_handler = iscsi_eh_host_reset,
+ .use_clustering = DISABLE_CLUSTERING,
+ .proc_name = "iscsi_iser",
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U7/iser_11_add_fmr_unalign_cnt.patch b/kernel_patches/backport/2.6.9_U7/iser_11_add_fmr_unalign_cnt.patch
new file mode 100644
index 0000000..ef2a2d6
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_11_add_fmr_unalign_cnt.patch
@@ -0,0 +1,25 @@
+From 1255c8e5209ce19644e83e353c260f2eddc62cca Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:54:57 +0300
+Subject: [PATCH] add fmr_unalign_cnt to struct iscsi_conn
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ include/scsi/libiscsi.h | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
+index ea0816d..182421f 100644
+--- a/include/scsi/libiscsi.h
++++ b/include/scsi/libiscsi.h
+@@ -197,6 +197,7 @@ struct iscsi_conn {
+
+ /* custom statistics */
+ uint32_t eh_abort_cnt;
++ uint32_t fmr_unalign_cnt;
+ };
+
+ struct iscsi_queue {
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U7/iser_12_remove_hdr_max.patch b/kernel_patches/backport/2.6.9_U7/iser_12_remove_hdr_max.patch
new file mode 100644
index 0000000..c475001
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_12_remove_hdr_max.patch
@@ -0,0 +1,25 @@
+From 97672ef8a29da5e16774d1de9527b2cc29415e36 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Thu, 3 Jul 2008 14:59:16 +0300
+Subject: [PATCH] remove hdr_max
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iscsi_iser.c | 1 -
+ 1 files changed, 0 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index de1e783..6451e9d 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -394,7 +394,6 @@ iscsi_iser_session_create(struct iscsi_transport *iscsit,
+ ctask = session->cmds[i];
+ iser_ctask = ctask->dd_data;
+ ctask->hdr = (struct iscsi_cmd *)&iser_ctask->desc.iscsi_header;
+- ctask->hdr_max = sizeof(iser_ctask->desc.iscsi_header);
+ }
+
+ for (i = 0; i < session->mgmtpool_max; i++) {
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U7/iser_13_fix_netlink_kernel_create.patch b/kernel_patches/backport/2.6.9_U7/iser_13_fix_netlink_kernel_create.patch
new file mode 100644
index 0000000..d47df44
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_13_fix_netlink_kernel_create.patch
@@ -0,0 +1,26 @@
+From db61fe2c3062d8918e793ddc7e1a8cc3694bf620 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:20:42 +0300
+Subject: [PATCH] fix netlink_kernel_create
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/scsi/scsi_transport_iscsi.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index e969ef7..a2f4fb7 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -1401,7 +1401,7 @@ static __init int iscsi_transport_init(void)
+ if (err)
+ goto unregister_conn_class;
+
+- nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
++ nls = netlink_kernel_create(NULL, NETLINK_ISCSI, 1, iscsi_if_rx, NULL,
+ THIS_MODULE);
+ if (!nls) {
+ err = -ENOBUFS;
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U7/iser_14_sync_attribute_container.c_from_ofed1.3.patch b/kernel_patches/backport/2.6.9_U7/iser_14_sync_attribute_container.c_from_ofed1.3.patch
new file mode 100644
index 0000000..e926007
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_14_sync_attribute_container.c_from_ofed1.3.patch
@@ -0,0 +1,394 @@
+From bed65721f623039a119b5ff03c6c1fe44a1ccfb3 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:26:20 +0300
+Subject: [PATCH] sync attribute_container.c from ofed1.3
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/base/attribute_container.c | 100 +++++++++++++++++------------------
+ drivers/base/transport_class.c | 21 ++++----
+ 2 files changed, 60 insertions(+), 61 deletions(-)
+
+diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
+index f57652d..7370d7c 100644
+--- a/drivers/base/attribute_container.c
++++ b/drivers/base/attribute_container.c
+@@ -27,21 +27,21 @@
+ struct internal_container {
+ struct klist_node node;
+ struct attribute_container *cont;
+- struct device classdev;
++ struct class_device classdev;
+ };
+
+ static void internal_container_klist_get(struct klist_node *n)
+ {
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+- get_device(&ic->classdev);
++ class_device_get(&ic->classdev);
+ }
+
+ static void internal_container_klist_put(struct klist_node *n)
+ {
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+- put_device(&ic->classdev);
++ class_device_put(&ic->classdev);
+ }
+
+
+@@ -53,7 +53,7 @@ static void internal_container_klist_put(struct klist_node *n)
+ * Returns the container associated with this classdev.
+ */
+ struct attribute_container *
+-attribute_container_classdev_to_container(struct device *classdev)
++attribute_container_classdev_to_container(struct class_device *classdev)
+ {
+ struct internal_container *ic =
+ container_of(classdev, struct internal_container, classdev);
+@@ -61,7 +61,7 @@ attribute_container_classdev_to_container(struct device *classdev)
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
+
+-static LIST_HEAD(attribute_container_list);
++static struct list_head attribute_container_list;
+
+ static DEFINE_MUTEX(attribute_container_mutex);
+
+@@ -110,11 +110,11 @@ attribute_container_unregister(struct attribute_container *cont)
+ EXPORT_SYMBOL_GPL(attribute_container_unregister);
+
+ /* private function used as class release */
+-static void attribute_container_release(struct device *classdev)
++static void attribute_container_release(struct class_device *classdev)
+ {
+ struct internal_container *ic
+ = container_of(classdev, struct internal_container, classdev);
+- struct device *dev = classdev->parent;
++ struct device *dev = classdev->dev;
+
+ kfree(ic);
+ put_device(dev);
+@@ -129,12 +129,12 @@ static void attribute_container_release(struct device *classdev)
+ * This function allocates storage for the class device(s) to be
+ * attached to dev (one for each matching attribute_container). If no
+ * fn is provided, the code will simply register the class device via
+- * device_add. If a function is provided, it is expected to add
++ * class_device_add. If a function is provided, it is expected to add
+ * the class device at the appropriate time. One of the things that
+ * might be necessary is to allocate and initialise the classdev and
+ * then add it a later time. To do this, call this routine for
+ * allocation and initialisation and then use
+- * attribute_container_device_trigger() to call device_add() on
++ * attribute_container_device_trigger() to call class_device_add() on
+ * it. Note: after this, the class device contains a reference to dev
+ * which is not relinquished until the release of the classdev.
+ */
+@@ -142,7 +142,7 @@ void
+ attribute_container_add_device(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -163,11 +163,11 @@ attribute_container_add_device(struct device *dev,
+ }
+
+ ic->cont = cont;
+- device_initialize(&ic->classdev);
+- ic->classdev.parent = get_device(dev);
++ class_device_initialize(&ic->classdev);
++ ic->classdev.dev = get_device(dev);
+ ic->classdev.class = cont->class;
+- cont->class->dev_release = attribute_container_release;
+- strcpy(ic->classdev.bus_id, dev->bus_id);
++ cont->class->release = attribute_container_release;
++ strcpy(ic->classdev.class_id, dev->bus_id);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else
+@@ -195,19 +195,20 @@ attribute_container_add_device(struct device *dev,
+ * @fn: A function to call to remove the device
+ *
+ * This routine triggers device removal. If fn is NULL, then it is
+- * simply done via device_unregister (note that if something
++ * simply done via class_device_unregister (note that if something
+ * still has a reference to the classdev, then the memory occupied
+ * will not be freed until the classdev is released). If you want a
+ * two phase release: remove from visibility and then delete the
+ * device, then you should use this routine with a fn that calls
+- * device_del() and then use attribute_container_device_trigger()
+- * to do the final put on the classdev.
++ * class_device_del() and then use
++ * attribute_container_device_trigger() to do the final put on the
++ * classdev.
+ */
+ void
+ attribute_container_remove_device(struct device *dev,
+ void (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -223,14 +224,14 @@ attribute_container_remove_device(struct device *dev,
+ continue;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (dev != ic->classdev.parent)
++ if (dev != ic->classdev.dev)
+ continue;
+ klist_del(&ic->node);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else {
+ attribute_container_remove_attrs(&ic->classdev);
+- device_unregister(&ic->classdev);
++ class_device_unregister(&ic->classdev);
+ }
+ }
+ }
+@@ -251,7 +252,7 @@ void
+ attribute_container_device_trigger(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+- struct device *))
++ struct class_device *))
+ {
+ struct attribute_container *cont;
+
+@@ -269,7 +270,7 @@ attribute_container_device_trigger(struct device *dev,
+ }
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (dev == ic->classdev.parent)
++ if (dev == ic->classdev.dev)
+ fn(cont, dev, &ic->classdev);
+ }
+ }
+@@ -312,23 +313,18 @@ attribute_container_trigger(struct device *dev,
+ * attributes listed in the container
+ */
+ int
+-attribute_container_add_attrs(struct device *classdev)
++attribute_container_add_attrs(struct class_device *classdev)
+ {
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+- struct device_attribute **attrs = cont->attrs;
++ struct class_device_attribute **attrs = cont->attrs;
+ int i, error;
+
+- BUG_ON(attrs && cont->grp);
+-
+- if (!attrs && !cont->grp)
++ if (!attrs)
+ return 0;
+
+- if (cont->grp)
+- return sysfs_create_group(&classdev->kobj, cont->grp);
+-
+ for (i = 0; attrs[i]; i++) {
+- error = device_create_file(classdev, attrs[i]);
++ error = class_device_create_file(classdev, attrs[i]);
+ if (error)
+ return error;
+ }
+@@ -337,18 +333,18 @@ attribute_container_add_attrs(struct device *classdev)
+ }
+
+ /**
+- * attribute_container_add_class_device - same function as device_add
++ * attribute_container_add_class_device - same function as class_device_add
+ *
+ * @classdev: the class device to add
+ *
+- * This performs essentially the same function as device_add except for
++ * This performs essentially the same function as class_device_add except for
+ * attribute containers, namely add the classdev to the system and then
+ * create the attribute files
+ */
+ int
+-attribute_container_add_class_device(struct device *classdev)
++attribute_container_add_class_device(struct class_device *classdev)
+ {
+- int error = device_add(classdev);
++ int error = class_device_add(classdev);
+ if (error)
+ return error;
+ return attribute_container_add_attrs(classdev);
+@@ -363,7 +359,7 @@ attribute_container_add_class_device(struct device *classdev)
+ int
+ attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ return attribute_container_add_class_device(classdev);
+ }
+@@ -375,23 +371,18 @@ attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ *
+ */
+ void
+-attribute_container_remove_attrs(struct device *classdev)
++attribute_container_remove_attrs(struct class_device *classdev)
+ {
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+- struct device_attribute **attrs = cont->attrs;
++ struct class_device_attribute **attrs = cont->attrs;
+ int i;
+
+- if (!attrs && !cont->grp)
++ if (!attrs)
+ return;
+
+- if (cont->grp) {
+- sysfs_remove_group(&classdev->kobj, cont->grp);
+- return ;
+- }
+-
+ for (i = 0; attrs[i]; i++)
+- device_remove_file(classdev, attrs[i]);
++ class_device_remove_file(classdev, attrs[i]);
+ }
+
+ /**
+@@ -400,13 +391,13 @@ attribute_container_remove_attrs(struct device *classdev)
+ * @classdev: the class device
+ *
+ * This function simply removes all the attribute files and then calls
+- * device_del.
++ * class_device_del.
+ */
+ void
+-attribute_container_class_device_del(struct device *classdev)
++attribute_container_class_device_del(struct class_device *classdev)
+ {
+ attribute_container_remove_attrs(classdev);
+- device_del(classdev);
++ class_device_del(classdev);
+ }
+
+ /**
+@@ -418,16 +409,16 @@ attribute_container_class_device_del(struct device *classdev)
+ * Looks up the device in the container's list of class devices and returns
+ * the corresponding class_device.
+ */
+-struct device *
++struct class_device *
+ attribute_container_find_class_device(struct attribute_container *cont,
+ struct device *dev)
+ {
+- struct device *cdev = NULL;
++ struct class_device *cdev = NULL;
+ struct internal_container *ic;
+ struct klist_iter iter;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+- if (ic->classdev.parent == dev) {
++ if (ic->classdev.dev == dev) {
+ cdev = &ic->classdev;
+ /* FIXME: must exit iterator then break */
+ klist_iter_exit(&iter);
+@@ -438,3 +429,10 @@ attribute_container_find_class_device(struct attribute_container *cont,
+ return cdev;
+ }
+ EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
++
++int __init
++attribute_container_init(void)
++{
++ INIT_LIST_HEAD(&attribute_container_list);
++ return 0;
++}
+diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
+index 84997ef..f25e7c6 100644
+--- a/drivers/base/transport_class.c
++++ b/drivers/base/transport_class.c
+@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(transport_class_unregister);
+
+ static int anon_transport_dummy_function(struct transport_container *tc,
+ struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ /* do nothing */
+ return 0;
+@@ -108,14 +108,13 @@ EXPORT_SYMBOL_GPL(anon_transport_class_register);
+ */
+ void anon_transport_class_unregister(struct anon_transport_class *atc)
+ {
+- if (unlikely(attribute_container_unregister(&atc->container)))
+- BUG();
++ attribute_container_unregister(&atc->container);
+ }
+ EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
+
+ static int transport_setup_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+@@ -127,7 +126,9 @@ static int transport_setup_classdev(struct attribute_container *cont,
+ }
+
+ /**
+- * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
++ * transport_setup_device - declare a new dev for transport class association
++ * but don't make it visible yet.
++ *
+ * @dev: the generic device representing the entity being added
+ *
+ * Usually, dev represents some component in the HBA system (either
+@@ -149,7 +150,7 @@ EXPORT_SYMBOL_GPL(transport_setup_device);
+
+ static int transport_add_class_device(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ int error = attribute_container_add_class_device(classdev);
+ struct transport_container *tcont =
+@@ -181,7 +182,7 @@ EXPORT_SYMBOL_GPL(transport_add_device);
+
+ static int transport_configure(struct attribute_container *cont,
+ struct device *dev,
+- struct device *cdev)
++ struct class_device *cdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+@@ -212,7 +213,7 @@ EXPORT_SYMBOL_GPL(transport_configure_device);
+
+ static int transport_remove_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_container *tcont =
+ attribute_container_to_transport_container(cont);
+@@ -251,12 +252,12 @@ EXPORT_SYMBOL_GPL(transport_remove_device);
+
+ static void transport_destroy_classdev(struct attribute_container *cont,
+ struct device *dev,
+- struct device *classdev)
++ struct class_device *classdev)
+ {
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+
+ if (tclass->remove != anon_transport_dummy_function)
+- put_device(classdev);
++ class_device_put(classdev);
+ }
+
+
+--
+1.5.3.8
+
diff --git a/kernel_patches/backport/2.6.9_U7/iser_15_fix_iscsi_free_mgmt_task.patch b/kernel_patches/backport/2.6.9_U7/iser_15_fix_iscsi_free_mgmt_task.patch
new file mode 100644
index 0000000..7a3a3ea
--- /dev/null
+++ b/kernel_patches/backport/2.6.9_U7/iser_15_fix_iscsi_free_mgmt_task.patch
@@ -0,0 +1,28 @@
+From 5a9fd2300982aca58f1306bdb98cab878998a607 Mon Sep 17 00:00:00 2001
+From: Doron Shoham <dorons at voltaire.com>
+Date: Sun, 6 Jul 2008 15:53:59 +0300
+Subject: [PATCH] fix iscsi_free_mgmt_task
+
+Signed-off-by: Doron Shoham <dorons at voltaire.com>
+---
+ drivers/infiniband/ulp/iser/iser_initiator.c | 4 +++-
+ 1 files changed, 3 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 4e20c8b..e7f2399 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -627,7 +627,9 @@ void iser_snd_completion(struct iser_desc *tx_desc)
+ struct iscsi_session *session = conn->session;
+
+ spin_lock(&conn->session->lock);
+- iscsi_free_mgmt_task(conn, mtask);
++ list_del(&mtask->running);
++ __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
++ sizeof(void*));
+ spin_unlock(&session->lock);
+ }
+ }
+--
+1.5.3.8
+
--
1.5.3.8
More information about the ewg
mailing list