[ewg] [PATCH OFED-1.5] Move SLES11 backports to attic

Jon Mason jon at opengridcomputing.com
Wed Jun 3 08:47:36 PDT 2009


The SLES11 backports are legacy patches from the OFED 1.4 build, and do
not apply cleanly to the 2.6.30 kernel.  Move them to the "attic" so
they can be referenced for their historical value but no longer
interfere   with the OFED 1.5 build.
  
Signed-Off-By: Jon Mason <jon at opengridcomputing.com>
---

diff --git a/kernel_patches/attic/backport/2.6.27_sles11/mlx4_en_0010_do_not_use_netdev_ops.patch b/kernel_patches/attic/backport/2.6.27_sles11/mlx4_en_0010_do_not_use_netdev_ops.patch
new file mode 100644
index 0000000..4d3b269
--- /dev/null
+++ b/kernel_patches/attic/backport/2.6.27_sles11/mlx4_en_0010_do_not_use_netdev_ops.patch
@@ -0,0 +1,65 @@
+From 34a43622ec035aa41a5383c31245838472784c1b Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Sun, 31 May 2009 11:59:25 +0300
+Subject: [PATCH 1/8] mlx4_en: Don't use netdev_ops
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+---
+ drivers/net/mlx4/en_netdev.c |   34 +++++++++++++++-------------------
+ 1 files changed, 15 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
+index a38adf5..4ad5f3c 100644
+--- a/drivers/net/mlx4/en_netdev.c
++++ b/drivers/net/mlx4/en_netdev.c
+@@ -933,24 +933,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+ 	return 0;
+ }
+ 
+-static const struct net_device_ops mlx4_netdev_ops = {
+-	.ndo_open		= mlx4_en_open,
+-	.ndo_stop		= mlx4_en_close,
+-	.ndo_start_xmit		= mlx4_en_xmit,
+-	.ndo_select_queue	= mlx4_en_select_queue,
+-	.ndo_get_stats		= mlx4_en_get_stats,
+-	.ndo_set_multicast_list	= mlx4_en_set_multicast,
+-	.ndo_set_mac_address	= mlx4_en_set_mac,
+-	.ndo_validate_addr	= eth_validate_addr,
+-	.ndo_change_mtu		= mlx4_en_change_mtu,
+-	.ndo_tx_timeout		= mlx4_en_tx_timeout,
+-	.ndo_vlan_rx_register	= mlx4_en_vlan_rx_register,
+-	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
+-	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+-	.ndo_poll_controller	= mlx4_en_netpoll,
+-#endif
+-};
+ 
+ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 			struct mlx4_en_port_profile *prof)
+@@ -1026,7 +1008,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 	/*
+ 	 * Initialize netdev entry points
+ 	 */
+-	dev->netdev_ops = &mlx4_netdev_ops;
++	dev->open		= mlx4_en_open;
++	dev->stop		= mlx4_en_close;
++	dev->hard_start_xmit	= mlx4_en_xmit,
++	dev->select_queue	= mlx4_en_select_queue,
++	dev->get_stats		= mlx4_en_get_stats,
++	dev->set_multicast_list	= mlx4_en_set_multicast,
++	dev->set_mac_address	= mlx4_en_set_mac,
++	dev->change_mtu		= mlx4_en_change_mtu,
++	dev->tx_timeout		= mlx4_en_tx_timeout,
++	dev->vlan_rx_register	= mlx4_en_vlan_rx_register,
++	dev->vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
++	dev->vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
++#ifdef CONFIG_NET_POLL_CONTROLLER
++	dev->poll_controller	= mlx4_en_netpoll,
++#endif
+ 	dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
+ 
+ 	SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+-- 
+1.6.1.3
+
diff --git a/kernel_patches/attic/backport/2.6.27_sles11/mlx4_en_0030_lro_backport.patch b/kernel_patches/attic/backport/2.6.27_sles11/mlx4_en_0030_lro_backport.patch
new file mode 100644
index 0000000..eefeae0
--- /dev/null
+++ b/kernel_patches/attic/backport/2.6.27_sles11/mlx4_en_0030_lro_backport.patch
@@ -0,0 +1,893 @@
+From 4f3262d88349cd4ac0cc0b8ecd458b7db4fe63e5 Mon Sep 17 00:00:00 2001
+From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+Date: Sun, 31 May 2009 14:57:40 +0300
+Subject: [PATCH] mlx4_en: use own lro implemetation
+
+Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
+---
+ drivers/net/mlx4/Makefile     |    2 +-
+ drivers/net/mlx4/en_ethtool.c |   17 --
+ drivers/net/mlx4/en_lro.c     |  540 +++++++++++++++++++++++++++++++++++++++++
+ drivers/net/mlx4/en_rx.c      |  109 +++------
+ drivers/net/mlx4/mlx4_en.h    |   52 ++++-
+ 5 files changed, 623 insertions(+), 97 deletions(-)
+ create mode 100644 drivers/net/mlx4/en_lro.c
+
+diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
+index 87c2259..ed94870 100644
+--- a/drivers/net/mlx4/Makefile
++++ b/drivers/net/mlx4/Makefile
+@@ -6,4 +6,4 @@ mlx4_core-y :=	alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
+ obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
+ 
+ mlx4_en-y := 	en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
+-		en_resources.o en_netdev.o en_frag.o
++		en_resources.o en_netdev.o en_frag.o en_lro.o
+diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
+index 091f990..19a10f3 100644
+--- a/drivers/net/mlx4/en_ethtool.c
++++ b/drivers/net/mlx4/en_ethtool.c
+@@ -39,21 +39,6 @@
+ #include "en_port.h"
+ 
+ 
+-static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
+-{
+-	int i;
+-
+-	priv->port_stats.lro_aggregated = 0;
+-	priv->port_stats.lro_flushed = 0;
+-	priv->port_stats.lro_no_desc = 0;
+-
+-	for (i = 0; i < priv->rx_ring_num; i++) {
+-		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
+-		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
+-		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
+-	}
+-}
+-
+ static void
+ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+ {
+@@ -163,8 +148,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
+ 
+ 	spin_lock_bh(&priv->stats_lock);
+ 
+-	mlx4_en_update_lro_stats(priv);
+-
+ 	for (i = 0; i < NUM_MAIN_STATS; i++)
+ 		data[index++] = ((unsigned long *) &priv->stats)[i];
+ 	for (i = 0; i < NUM_PORT_STATS; i++)
+diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
+new file mode 100644
+index 0000000..bb5563f
+--- /dev/null
++++ b/drivers/net/mlx4/en_lro.c
+@@ -0,0 +1,540 @@
++/*
++ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
++ *
++ * This software is available to you under a choice of one of two
++ * licenses.  You may choose to be licensed under the terms of the GNU
++ * General Public License (GPL) Version 2, available from the file
++ * COPYING in the main directory of this source tree, or the
++ * OpenIB.org BSD license below:
++ *
++ *     Redistribution and use in source and binary forms, with or
++ *     without modification, are permitted provided that the following
++ *     conditions are met:
++ *
++ *      - Redistributions of source code must retain the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer.
++ *
++ *      - Redistributions in binary form must reproduce the above
++ *        copyright notice, this list of conditions and the following
++ *        disclaimer in the documentation and/or other materials
++ *        provided with the distribution.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ */
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <net/tcp.h>
++#include <linux/if_vlan.h>
++#include <linux/delay.h>
++
++#include "mlx4_en.h"
++
++/* LRO hash function - using sum of source and destination port LSBs is
++ * good enough */
++#define LRO_INDEX(th, size) \
++	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
++
++/* #define CONFIG_MLX4_EN_DEBUG_LRO */
++
++#ifdef CONFIG_MLX4_EN_DEBUG_LRO
++static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
++{
++	int i;
++	int size, size2;
++	struct sk_buff *skb = lro->skb;
++	skb_frag_t *frags;
++	int len, len2;
++	int cur_skb = 0;
++
++	/* Sum fragment sizes of first skb */
++	len = skb->len;
++	size = skb_headlen(skb);
++	frags = skb_shinfo(skb)->frags;
++	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++		size += frags[i].size;
++	}
++
++	/* Add in fragments of linked skb's */
++	skb = skb_shinfo(skb)->frag_list;
++	while (skb) {
++		cur_skb++;
++		len2 = skb->len;
++		if (skb_headlen(skb)) {
++			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
++				  "in fraglist (skb:%d)\n", cur_skb);
++			return;
++		}
++
++		size2 = 0;
++		frags = skb_shinfo(skb)->frags;
++		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++			size2 += frags[i].size;
++		}
++
++		if (size2 != len2) {
++			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
++			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
++			return;
++		}
++		size += size2;
++		skb = skb->next;
++	}
++
++	if (size != len)
++		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
++}
++#endif /* MLX4_EN_DEBUG_LRO */
++
++static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
++		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
++{
++	struct sk_buff *skb = lro->skb;
++	struct iphdr *iph = (struct iphdr *) skb->data;
++	struct tcphdr *th = (struct tcphdr *)(iph + 1);
++	unsigned int headlen = skb_headlen(skb);
++	__wsum tcp_hdr_csum;
++	u32 *ts;
++
++	/* Update IP length and checksum */
++	iph->tot_len = htons(lro->tot_len);
++	iph->check = 0;
++	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
++
++	/* Update latest TCP ack, window, psh, and timestamp */
++	th->ack_seq = lro->ack_seq;
++	th->window = lro->window;
++	th->psh = !!lro->psh;
++	if (lro->has_timestamp) {
++		ts = (u32 *) (th + 1);
++		ts[1] = htonl(lro->tsval);
++		ts[2] = lro->tsecr;
++	}
++	th->check = 0;
++	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
++	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
++	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
++				      lro->tot_len - (iph->ihl << 2),
++				      IPPROTO_TCP, lro->data_csum);
++
++	/* Update skb */
++	skb->len = lro->tot_len;
++	skb->data_len = lro->tot_len - headlen;
++	skb->truesize = skb->len + sizeof(struct sk_buff);
++	skb_shinfo(skb)->gso_size = lro->mss;
++
++#ifdef CONFIG_MLX4_EN_DEBUG_LRO
++	mlx4_en_lro_validate(priv, lro);
++#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
++
++	/* Push it up the stack */
++	if (priv->vlgrp && lro->has_vlan)
++		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
++					be16_to_cpu(lro->vlan_prio));
++	else
++		netif_receive_skb(skb);
++	priv->dev->last_rx = jiffies;
++
++	/* Increment stats */
++	priv->port_stats.lro_flushed++;
++
++	/* Move session back to the free list */
++	hlist_del(&lro->node);
++	hlist_del(&lro->flush_node);
++	hlist_add_head(&lro->node, &ring->lro_free);
++}
++
++void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
++{
++	struct mlx4_en_lro *lro;
++	struct hlist_node *node, *tmp;
++
++	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
++		if (all || time_after(jiffies, lro->expires))
++			mlx4_en_lro_flush_single(priv, ring, lro);
++	}
++}
++
++static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
++				   struct mlx4_en_lro *lro,
++				   struct mlx4_en_rx_desc *rx_desc,
++				   struct skb_frag_struct *skb_frags,
++				   struct mlx4_en_rx_alloc *page_alloc,
++				   unsigned int data_len,
++				   int hlen)
++{
++	struct sk_buff *skb = lro->skb_last;
++	struct skb_shared_info *info;
++	struct skb_frag_struct *frags_copy;
++	int nr_frags;
++
++	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
++		return -ENOMEM;
++
++	info = skb_shinfo(skb);
++
++	/* Copy fragments from descriptor ring to skb */
++	frags_copy = info->frags + info->nr_frags;
++	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
++						frags_copy,
++						page_alloc,
++						data_len + hlen);
++	if (!nr_frags) {
++		en_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
++		return -ENOMEM;
++	}
++
++	/* Skip over headers */
++	frags_copy[0].page_offset += hlen;
++
++	if (nr_frags == 1)
++		frags_copy[0].size = data_len;
++	else {
++		/* Adjust size of last fragment to match packet length.
++		 * Note: if this fragment is also the first one, the
++		 *       operation is completed in the next line */
++		frags_copy[nr_frags - 1].size = hlen + data_len -
++				priv->frag_info[nr_frags - 1].frag_prefix_size;
++
++		/* Adjust size of first fragment */
++		frags_copy[0].size -= hlen;
++	}
++
++	/* Update skb bookkeeping */
++	skb->len += data_len;
++	skb->data_len += data_len;
++	info->nr_frags += nr_frags;
++	return 0;
++}
++
++static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
++						       struct mlx4_en_rx_ring *ring,
++						       struct iphdr *iph,
++						       struct tcphdr *th)
++{
++	struct mlx4_en_lro *lro;
++	struct hlist_node *node;
++	int index = LRO_INDEX(th, mdev->profile.num_lro);
++	struct hlist_head *list = &ring->lro_hash[index];
++
++	hlist_for_each_entry(lro, node, list, node) {
++		if (lro->sport_dport == *((u32*) &th->source) &&
++		    lro->saddr == iph->saddr &&
++		    lro->daddr == iph->daddr)
++			return lro;
++	}
++	return NULL;
++}
++
++static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
++							struct mlx4_en_rx_ring *ring)
++{
++	return hlist_empty(&ring->lro_free) ? NULL :
++		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
++}
++
++static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
++					struct tcphdr *th, int len)
++{
++	__wsum tcp_csum;
++	__wsum tcp_hdr_csum;
++	__wsum tcp_ps_hdr_csum;
++
++	tcp_csum = ~csum_unfold(th->check);
++	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
++
++	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
++					     len + (th->doff << 2),
++					     IPPROTO_TCP, 0);
++
++	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
++			tcp_ps_hdr_csum);
++}
++
++int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
++					  struct mlx4_en_rx_desc *rx_desc,
++					  struct skb_frag_struct *skb_frags,
++					  unsigned int length,
++					  struct mlx4_cqe *cqe)
++{
++	struct mlx4_en_dev *mdev = priv->mdev;
++	struct mlx4_en_lro *lro;
++	struct sk_buff *skb;
++	struct iphdr *iph;
++	struct tcphdr *th;
++	dma_addr_t dma;
++	int tcp_hlen;
++	int tcp_data_len;
++	int hlen;
++	u16 ip_len;
++	void *va;
++	u32 *ts;
++	u32 seq;
++	u32 tsval = (u32) ~0UL;
++	u32 tsecr = 0;
++	u32 ack_seq;
++	u16 window;
++
++	/* This packet is eligible for LRO if it is:
++	 * - DIX Ethernet (type interpretation)
++	 * - TCP/IP (v4)
++	 * - without IP options
++	 * - not an IP fragment */
++	if (!mlx4_en_can_lro(cqe->status))
++			return -1;
++
++	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
++	 * with no VLAN (HW stripped it) and no IP options */
++	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
++	iph = va + ETH_HLEN;
++	th = (struct tcphdr *)(iph + 1);
++
++	/* Synchronsize headers for processing */
++	dma = be64_to_cpu(rx_desc->data[0].addr);
++#define MAX_LRO_HEADER		(ETH_HLEN + \
++				 sizeof(*iph) + \
++				 sizeof(*th) + \
++				 TCPOLEN_TSTAMP_ALIGNED)
++	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
++				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
++
++	/* We only handle aligned timestamp options */
++	tcp_hlen = (th->doff << 2);
++	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
++		ts = (u32*) (th + 1);
++		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
++					  (TCPOPT_NOP << 16) |
++					  (TCPOPT_TIMESTAMP << 8) |
++					  TCPOLEN_TIMESTAMP)))
++			goto sync_device;
++		tsval = ntohl(ts[1]);
++		tsecr = ts[2];
++	} else if (tcp_hlen != sizeof(*th))
++		goto sync_device;
++	
++
++	/* At this point we know we have a TCP packet that is likely to be
++	 * eligible for LRO. Therefore, see now if we have an oustanding
++	 * session that corresponds to this packet so we could flush it if
++	 * something still prevents LRO */
++	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
++
++	/* ensure no bits set besides ack or psh */
++	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
++	    th->cwr || !th->ack) {
++		if (lro) {
++			/* First flush session to keep packets in-order */
++			mlx4_en_lro_flush_single(priv, ring, lro);
++		}
++		goto sync_device;
++	}
++
++	/* Get ip length and verify that the frame is big enough */
++	ip_len = ntohs(iph->tot_len);
++	if (unlikely(length < ETH_HLEN + ip_len)) {
++		en_warn(priv, "Cannot LRO - ip payload exceeds frame!\n");
++		goto sync_device;
++	}
++
++	/* Get TCP payload length */
++	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
++	seq = ntohl(th->seq);
++	if (!tcp_data_len)
++		goto flush_session;
++
++	if (lro) {
++		/* Check VLAN tag */
++		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
++			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
++				mlx4_en_lro_flush_single(priv, ring, lro);
++				goto sync_device;
++			}
++		} else if (lro->has_vlan) {
++			mlx4_en_lro_flush_single(priv, ring, lro);
++			goto sync_device;
++		}
++
++		/* Check sequence number */
++		if (unlikely(seq != lro->next_seq)) {
++			mlx4_en_lro_flush_single(priv, ring, lro);
++			goto sync_device;
++		}
++
++		/* If the cummulative IP length is over 64K, flush and start
++		 * a new session */
++		if (lro->tot_len + tcp_data_len > 0xffff) {
++			mlx4_en_lro_flush_single(priv, ring, lro);
++			goto new_session;
++		}
++
++		/* Check timestamps */
++		if (tcp_hlen != sizeof(*th)) {
++			if (unlikely(lro->tsval > tsval || !tsecr))
++				goto sync_device;
++		}
++
++		window = th->window;
++		ack_seq = th->ack_seq;
++		if (likely(tcp_data_len)) {
++			/* Append the data! */
++			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
++			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
++							ring->page_alloc,
++							tcp_data_len, hlen)) {
++				mlx4_en_lro_flush_single(priv, ring, lro);
++				goto sync_device;
++			}
++		} else {
++			/* No data */
++			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
++							 0, MAX_LRO_HEADER,
++							 DMA_FROM_DEVICE);
++		}
++
++		/* Update session */
++		lro->psh |= th->psh;
++		lro->next_seq += tcp_data_len;
++		lro->data_csum = csum_block_add(lro->data_csum,
++					mlx4_en_lro_tcp_data_csum(iph, th,
++								  tcp_data_len),
++					lro->tot_len);
++		lro->tot_len += tcp_data_len;
++		lro->tsval = tsval;
++		lro->tsecr = tsecr;
++		lro->ack_seq = ack_seq;
++		lro->window = window;
++		if (tcp_data_len > lro->mss)
++			lro->mss = tcp_data_len;
++		priv->port_stats.lro_aggregated++;
++		if (th->psh)
++			mlx4_en_lro_flush_single(priv, ring, lro);
++		return 0;
++	}
++
++new_session:
++	if (th->psh)
++		goto sync_device;
++	lro = mlx4_en_lro_alloc_session(priv, ring);
++	if (lro) {
++		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
++							     ETH_HLEN + ip_len);
++		if (skb) {
++			int index;
++
++			/* Add in the skb */
++			lro->skb = skb;
++			lro->skb_last = skb;
++			skb->protocol = eth_type_trans(skb, priv->dev);
++			skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++			/* Initialize session */
++			lro->saddr = iph->saddr;
++			lro->daddr = iph->daddr;
++			lro->sport_dport = *((u32*) &th->source);
++
++			lro->next_seq = seq + tcp_data_len;
++			lro->tot_len = ip_len;
++			lro->psh = th->psh;
++			lro->ack_seq = th->ack_seq;
++			lro->window = th->window;
++			lro->mss = tcp_data_len;
++			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
++						tcp_data_len);
++
++			/* Handle vlans */
++			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
++				lro->vlan_prio = cqe->sl_vid;
++				lro->has_vlan = 1;
++			} else
++				lro->has_vlan = 0;
++
++			/* Handle timestamps */
++			if (tcp_hlen != sizeof(*th)) {
++				lro->tsval = tsval;
++				lro->tsecr = tsecr;
++				lro->has_timestamp = 1;
++			} else {
++				lro->tsval = (u32) ~0UL;
++				lro->has_timestamp = 0;
++			}
++
++			/* Activate this session */
++			lro->expires = jiffies + HZ / 25;
++			hlist_del(&lro->node);
++			index = LRO_INDEX(th, mdev->profile.num_lro);
++
++			hlist_add_head(&lro->node, &ring->lro_hash[index]);
++			hlist_add_head(&lro->flush_node, &ring->lro_flush);
++			priv->port_stats.lro_aggregated++;
++			return 0;
++		} else {
++			/* Packet is dropped because we were not able to allocate new
++			 * page for fragments */
++			dma_sync_single_range_for_device(&mdev->pdev->dev, dma,
++							 0, MAX_LRO_HEADER,
++							 DMA_FROM_DEVICE);
++			return 0;
++		}
++	} else {
++		priv->port_stats.lro_no_desc++;
++	}
++
++flush_session:
++	if (lro)
++		mlx4_en_lro_flush_single(priv, ring, lro);
++sync_device:
++	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
++					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
++	return -1;
++}
++
++void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
++{
++	struct mlx4_en_lro *lro;
++	struct hlist_node *node, *tmp;
++
++	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
++		hlist_del(&lro->node);
++		kfree(lro);
++	}
++	kfree(ring->lro_hash);
++}
++
++int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
++{
++	struct mlx4_en_lro *lro;
++	int i;
++
++	INIT_HLIST_HEAD(&ring->lro_free);
++	INIT_HLIST_HEAD(&ring->lro_flush);
++	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
++				 GFP_KERNEL);
++	if (!ring->lro_hash)
++		return -ENOMEM;
++
++	for (i = 0; i < num_lro; i++) {
++		INIT_HLIST_HEAD(&ring->lro_hash[i]);
++		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
++		if (!lro) {
++			mlx4_en_lro_destroy(ring);
++			return -ENOMEM;
++		}
++		INIT_HLIST_NODE(&lro->node);
++		INIT_HLIST_NODE(&lro->flush_node);
++		hlist_add_head(&lro->node, &ring->lro_free);
++	}
++	return 0;
++}
++
++
+diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
+index a4b1203..6bc6113 100644
+--- a/drivers/net/mlx4/en_rx.c
++++ b/drivers/net/mlx4/en_rx.c
+@@ -51,18 +51,6 @@ static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
+ 	return;
+ }
+ 
+-static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
+-				   void **ip_hdr, void **tcpudp_hdr,
+-				   u64 *hdr_flags, void *priv)
+-{
+-	*mac_hdr = page_address(frags->page) + frags->page_offset;
+-	*ip_hdr = *mac_hdr + ETH_HLEN;
+-	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
+-	*hdr_flags = LRO_IPV4 | LRO_TCP;
+-
+-	return 0;
+-}
+-
+ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+ 			      struct mlx4_en_rx_desc *rx_desc,
+ 			      struct skb_frag_struct *skb_frags,
+@@ -455,23 +443,14 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ 	}
+ 	ring->buf = ring->wqres.buf.direct.buf;
+ 
+-	/* Configure lro mngr */
+-	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
+-	ring->lro.dev = priv->dev;
+-	ring->lro.features = LRO_F_NAPI;
+-	ring->lro.frag_align_pad = NET_IP_ALIGN;
+-	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
+-	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+-	ring->lro.max_desc = mdev->profile.num_lro;
+-	ring->lro.max_aggr = MAX_SKB_FRAGS;
+-	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
+-				    sizeof(struct net_lro_desc),
+-				    GFP_KERNEL);
+-	if (!ring->lro.lro_arr) {
+-		en_err(priv, "Failed to allocate lro array\n");
+-		goto err_map;
++	/* Allocate LRO sessions */
++	if (mdev->profile.num_lro) {
++		err =  mlx4_en_lro_init(ring, mdev->profile.num_lro);
++		if (err) {
++			en_err(priv, "Failed allocating lro sessions\n");
++			goto err_map;
++		}
+ 	}
+-	ring->lro.get_frag_header = mlx4_en_get_frag_header;
+ 
+ 	return 0;
+ 
+@@ -588,7 +567,8 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ {
+ 	struct mlx4_en_dev *mdev = priv->mdev;
+ 
+-	kfree(ring->lro.lro_arr);
++	if (mdev->profile.num_lro)
++		mlx4_en_lro_destroy(ring);
+ 	mlx4_en_unmap_buffer(&ring->wqres.buf);
+ 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ 	vfree(ring->rx_info);
+@@ -608,12 +588,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+ 
+ 
+ /* Unmap a completed descriptor and free unused pages */
+-static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+-				    struct mlx4_en_rx_desc *rx_desc,
+-				    struct skb_frag_struct *skb_frags,
+-				    struct skb_frag_struct *skb_frags_rx,
+-				    struct mlx4_en_rx_alloc *page_alloc,
+-				    int length)
++int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
++			     struct mlx4_en_rx_desc *rx_desc,
++			     struct skb_frag_struct *skb_frags,
++			     struct skb_frag_struct *skb_frags_rx,
++			     struct mlx4_en_rx_alloc *page_alloc,
++			     int length)
+ {
+ 	struct mlx4_en_dev *mdev = priv->mdev;
+ 	struct mlx4_en_frag_info *frag_info;
+@@ -656,11 +636,11 @@ fail:
+ }
+ 
+ 
+-static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+-				      struct mlx4_en_rx_desc *rx_desc,
+-				      struct skb_frag_struct *skb_frags,
+-				      struct mlx4_en_rx_alloc *page_alloc,
+-				      unsigned int length)
++struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
++			       struct mlx4_en_rx_desc *rx_desc,
++			       struct skb_frag_struct *skb_frags,
++			       struct mlx4_en_rx_alloc *page_alloc,
++			       unsigned int length)
+ {
+ 	struct mlx4_en_dev *mdev = priv->mdev;
+ 	struct sk_buff *skb;
+@@ -901,14 +881,13 @@ out:
+ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
+ {
+ 	struct mlx4_en_priv *priv = netdev_priv(dev);
++	struct mlx4_en_dev *mdev = priv->mdev;
+ 	struct mlx4_cqe *cqe;
+ 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+ 	struct skb_frag_struct *skb_frags;
+-	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
+ 	struct mlx4_en_rx_desc *rx_desc;
+ 	struct sk_buff *skb;
+ 	int index;
+-	int nr;
+ 	unsigned int length;
+ 	int polled = 0;
+ 	int ip_summed;
+@@ -946,40 +925,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ 
+ 		if (likely(priv->rx_csum)) {
+ 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+-			    (cqe->checksum == cpu_to_be16(0xffff))) {
++			    (cqe->checksum == 0xffff)) {
+ 				priv->port_stats.rx_chksum_good++;
+-				/* This packet is eligible for LRO if it is:
+-				 * - DIX Ethernet (type interpretation)
+-				 * - TCP/IP (v4)
+-				 * - without IP options
+-				 * - not an IP fragment */
+-				if (mlx4_en_can_lro(cqe->status) &&
+-				    dev->features & NETIF_F_LRO) {
+-
+-					nr = mlx4_en_complete_rx_desc(
+-						priv, rx_desc,
+-						skb_frags, lro_frags,
+-						ring->page_alloc, length);
+-					if (!nr)
+-						goto next;
+-
+-					if (priv->vlgrp && (cqe->vlan_my_qpn &
+-							    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
+-						lro_vlan_hwaccel_receive_frags(
+-						       &ring->lro, lro_frags,
+-						       length, length,
+-						       priv->vlgrp,
+-						       be16_to_cpu(cqe->sl_vid),
+-						       NULL, 0);
+-					} else
+-						lro_receive_frags(&ring->lro,
+-								  lro_frags,
+-								  length,
+-								  length,
+-								  NULL, 0);
+-
++				if (mdev->profile.num_lro &&
++				    !mlx4_en_lro_rx(priv, ring, rx_desc,
++						    skb_frags, length, cqe))
+ 					goto next;
+-				}
+ 
+ 				/* LRO not possible, complete processing here */
+ 				ip_summed = CHECKSUM_UNNECESSARY;
+@@ -1002,7 +953,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ 
+ 		skb->ip_summed = ip_summed;
+ 		skb->protocol = eth_type_trans(skb, dev);
+-		skb_record_rx_queue(skb, cq->ring);
+ 
+ 		/* Push it up the stack */
+ 		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
+@@ -1012,6 +962,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ 		} else
+ 			netif_receive_skb(skb);
+ 
++		dev->last_rx = jiffies;
++
+ next:
+ 		++cq->mcq.cons_index;
+ 		index = (cq->mcq.cons_index) & ring->size_mask;
+@@ -1019,13 +971,15 @@ next:
+ 		if (++polled == budget) {
+ 			/* We are here because we reached the NAPI budget -
+ 			 * flush only pending LRO sessions */
+-			lro_flush_all(&ring->lro);
++			if (mdev->profile.num_lro)
++				mlx4_en_lro_flush(priv, ring, 0);
+ 			goto out;
+ 		}
+ 	}
+ 
+ 	/* If CQ is empty flush all LRO sessions unconditionally */
+-	lro_flush_all(&ring->lro);
++	if (mdev->profile.num_lro)
++		mlx4_en_lro_flush(priv, ring, 1);
+ 
+ out:
+ 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
+@@ -1042,7 +996,6 @@ out:
+ 	return polled;
+ }
+ 
+-
+ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+ {
+ 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
+index b45774c..e9174c4 100644
+--- a/drivers/net/mlx4/mlx4_en.h
++++ b/drivers/net/mlx4/mlx4_en.h
+@@ -296,11 +296,41 @@ struct mlx4_en_rx_desc {
+ 	struct mlx4_wqe_data_seg data[0];
+ };
+ 
++struct mlx4_en_lro {
++	struct hlist_node node;
++	struct hlist_node flush_node;
++
++	/* Id fields come first: */
++	u32 saddr;
++	u32 daddr;
++	u32 sport_dport;
++	u32 next_seq;
++	u16 tot_len;
++	u8 psh;
++
++	u32 tsval;
++	u32 tsecr;
++	u32 ack_seq;
++	u16 window;
++	__be16 vlan_prio;
++	u16 has_vlan;
++	u16 has_timestamp;
++	u16 mss;
++	__wsum  data_csum;
++
++	unsigned long expires;
++	struct sk_buff *skb;
++	struct sk_buff *skb_last;
++};
++
+ struct mlx4_en_rx_ring {
+ 	struct mlx4_srq srq;
+ 	struct mlx4_hwq_resources wqres;
+ 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+-	struct net_lro_mgr lro;
++	struct mlx4_en_lro lro;
++	struct hlist_head *lro_hash;
++	struct hlist_head lro_free;
++	struct hlist_head lro_flush;
+ 	u32 size ;	/* number of Rx descs*/
+ 	u32 actual_size;
+ 	u32 size_mask;
+@@ -592,12 +622,32 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
+ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
+ 				 struct mlx4_en_rss_map *rss_map,
+ 				 int num_entries, int num_rings);
++
++void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
++int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
++		   struct mlx4_en_rx_desc *rx_desc,
++		   struct skb_frag_struct *skb_frags,
++		   unsigned int length, struct mlx4_cqe *cqe);
++void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
++int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
++
+ void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
+ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
+ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
+ void mlx4_en_rx_refill(struct work_struct *work);
+ void mlx4_en_rx_irq(struct mlx4_cq *mcq);
++struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
++			       struct mlx4_en_rx_desc *rx_desc,
++			       struct skb_frag_struct *skb_frags,
++			       struct mlx4_en_rx_alloc *page_alloc,
++			       unsigned int length);
++int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
++			     struct mlx4_en_rx_desc *rx_desc,
++			     struct skb_frag_struct *skb_frags,
++			     struct skb_frag_struct *skb_frags_rx,
++			     struct mlx4_en_rx_alloc *page_alloc,
++			     int length);
+ 
+ int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
+ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
+-- 
+1.6.0
+
diff --git a/kernel_patches/attic/backport/2.6.27_sles11/sdp_0080_revert_to_2_6_28.patch b/kernel_patches/attic/backport/2.6.27_sles11/sdp_0080_revert_to_2_6_28.patch
new file mode 100644
index 0000000..59385b0
--- /dev/null
+++ b/kernel_patches/attic/backport/2.6.27_sles11/sdp_0080_revert_to_2_6_28.patch
@@ -0,0 +1,92 @@
+diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
+index 7a38c47..51801e0 100644
+--- a/drivers/infiniband/ulp/sdp/sdp_main.c
++++ b/drivers/infiniband/ulp/sdp/sdp_main.c
+@@ -580,7 +580,7 @@ adjudge_to_death:
+ 		/* TODO: tcp_fin_time to get timeout */
+ 		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
+ 			atomic_read(&sk->sk_refcnt));
+-		percpu_counter_inc(sk->sk_prot->orphan_count);
++		atomic_inc(sk->sk_prot->orphan_count);
+ 	}
+ 
+ 	/* TODO: limit number of orphaned sockets.
+@@ -861,7 +861,7 @@ void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk)
+ 		sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO);
+ 	}
+ 
+-	percpu_counter_dec(ssk->isk.sk.sk_prot->orphan_count);
++	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
+ }
+ 
+ void sdp_destroy_work(struct work_struct *work)
+@@ -902,7 +902,7 @@ void sdp_dreq_wait_timeout_work(struct work_struct *work)
+ 	sdp_sk(sk)->dreq_wait_timeout = 0;
+ 
+ 	if (sk->sk_state == TCP_FIN_WAIT1)
+-		percpu_counter_dec(ssk->isk.sk.sk_prot->orphan_count);
++		atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
+ 
+ 	sdp_exch_state(sk, TCPF_LAST_ACK | TCPF_FIN_WAIT1, TCP_TIME_WAIT);
+ 
+@@ -2162,9 +2162,9 @@ void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb)
+ 		sk->sk_data_ready(sk, 0);
+ }
+ 
+-static struct percpu_counter *sockets_allocated;
++static atomic_t sockets_allocated;
+ static atomic_t memory_allocated;
+-static struct percpu_counter *orphan_count;
++static atomic_t orphan_count;
+ static int memory_pressure;
+ struct proto sdp_proto = {
+         .close       = sdp_close,
+@@ -2182,8 +2182,10 @@ struct proto sdp_proto = {
+         .get_port    = sdp_get_port,
+ 	/* Wish we had this: .listen   = sdp_listen */
+ 	.enter_memory_pressure = sdp_enter_memory_pressure,
++	.sockets_allocated = &sockets_allocated,
+ 	.memory_allocated = &memory_allocated,
+ 	.memory_pressure = &memory_pressure,
++	.orphan_count = &orphan_count,
+         .sysctl_mem             = sysctl_tcp_mem,
+         .sysctl_wmem            = sysctl_tcp_wmem,
+         .sysctl_rmem            = sysctl_tcp_rmem,
+@@ -2538,15 +2540,6 @@ static int __init sdp_init(void)
+ 	spin_lock_init(&sock_list_lock);
+ 	spin_lock_init(&sdp_large_sockets_lock);
+ 
+-	sockets_allocated = kmalloc(sizeof(*sockets_allocated), GFP_KERNEL);
+-	orphan_count = kmalloc(sizeof(*orphan_count), GFP_KERNEL);
+-	percpu_counter_init(sockets_allocated, 0);
+-	percpu_counter_init(orphan_count, 0);
+-
+-	sdp_proto.sockets_allocated = sockets_allocated;
+-	sdp_proto.orphan_count = orphan_count;
+-
+-
+ 	sdp_workqueue = create_singlethread_workqueue("sdp");
+ 	if (!sdp_workqueue) {
+ 		return -ENOMEM;
+@@ -2581,9 +2574,9 @@ static void __exit sdp_exit(void)
+ 	sock_unregister(PF_INET_SDP);
+ 	proto_unregister(&sdp_proto);
+ 
+-	if (percpu_counter_read_positive(orphan_count))
+-		printk(KERN_WARNING "%s: orphan_count %lld\n", __func__,
+-		       percpu_counter_read_positive(orphan_count));
++	if (atomic_read(&orphan_count))
++		printk(KERN_WARNING "%s: orphan_count %d\n", __func__,
++		       atomic_read(&orphan_count));
+ 	destroy_workqueue(sdp_workqueue);
+ 	flush_scheduled_work();
+ 
+@@ -2596,8 +2589,6 @@ static void __exit sdp_exit(void)
+ 	sdp_proc_unregister();
+ 
+ 	ib_unregister_client(&sdp_client);
+-	kfree(orphan_count);
+-	kfree(sockets_allocated);
+ }
+ 
+ module_init(sdp_init);
diff --git a/kernel_patches/attic/backport/2.6.27_sles11/to_sles11.patch b/kernel_patches/attic/backport/2.6.27_sles11/to_sles11.patch
new file mode 100644
index 0000000..6b50549
--- /dev/null
+++ b/kernel_patches/attic/backport/2.6.27_sles11/to_sles11.patch
@@ -0,0 +1,493 @@
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 299e075..5349778 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
+ 		scsi_queue_work(conn->session->host, &conn->xmitwork);
+ }
+ 
+-static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
++static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ {
+ 	struct iscsi_cls_session *cls_session;
+ 	struct iscsi_session *session;
+ 	struct iscsi_conn *conn;
+-	enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
++	enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+ 
+ 	cls_session = starget_to_session(scsi_target(scmd->device));
+ 	session = cls_session->dd_data;
+@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ 		 * We are probably in the middle of iscsi recovery so let
+ 		 * that complete and handle the error.
+ 		 */
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ 		goto done;
+ 	}
+ 
+ 	conn = session->leadconn;
+ 	if (!conn) {
+ 		/* In the middle of shuting down */
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ 		goto done;
+ 	}
+ 
+@@ -1513,20 +1513,20 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
+ 	 */
+ 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+ 			    (conn->ping_timeout * HZ), jiffies))
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ 	/*
+ 	 * if we are about to check the transport then give the command
+ 	 * more time
+ 	 */
+ 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
+ 			   jiffies))
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ 	/* if in the middle of checking the transport then give us more time */
+ 	if (conn->ping_task)
+-		rc = EH_RESET_TIMER;
++		rc = BLK_EH_RESET_TIMER;
+ done:
+ 	spin_unlock(&session->lock);
+-	debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
++	debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "nh");
+ 	return rc;
+ }
+ 
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 7846065..30541f0 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
+ 		file->f_path.dentry->d_name.name,
+ 		mapping->host->i_ino, len, (long long) pos);
+ 
+-	page = __grab_cache_page(mapping, index);
++	page = grab_cache_page_write_begin(mapping, index, flags);
+ 	if (!page)
+ 		return -ENOMEM;
+ 	*pagep = page;
+diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
+index 108f47e..2389a2e 100644
+--- a/include/linux/nfsd/nfsd.h
++++ b/include/linux/nfsd/nfsd.h
+@@ -85,7 +85,8 @@ __be32		nfsd_setattr(struct svc_rqst *, struct svc_fh *,
+ #ifdef CONFIG_NFSD_V4
+ __be32          nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
+                     struct nfs4_acl *);
+-int             nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
++int             nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, 
++		struct vfsmount *mnt, struct nfs4_acl **);
+ #endif /* CONFIG_NFSD_V4 */
+ __be32		nfsd_create(struct svc_rqst *, struct svc_fh *,
+ 				char *name, int len, struct iattr *attrs,
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 18060be..715ff2a 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -388,7 +388,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ 	err = nfserr_notsync;
+ 	if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+ 		fh_lock(fhp);
+-		host_err = notify_change(dentry, iap);
++		host_err = notify_change(dentry, fhp->fh_export->ex_path.mnt, iap);
+ 		err = nfserrno(host_err);
+ 		fh_unlock(fhp);
+ 	}
+@@ -408,11 +408,12 @@ out_nfserr:
+ #if defined(CONFIG_NFSD_V2_ACL) || \
+     defined(CONFIG_NFSD_V3_ACL) || \
+     defined(CONFIG_NFSD_V4)
+-static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
++static ssize_t nfsd_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++			     char *key, void **buf)
+ {
+ 	ssize_t buflen;
+ 
+-	buflen = vfs_getxattr(dentry, key, NULL, 0);
++	buflen = vfs_getxattr(dentry, mnt, key, NULL, 0, NULL);
+ 	if (buflen <= 0)
+ 		return buflen;
+ 
+@@ -420,13 +421,14 @@ static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
+ 	if (!*buf)
+ 		return -ENOMEM;
+ 
+-	return vfs_getxattr(dentry, key, *buf, buflen);
++	return vfs_getxattr(dentry, mnt, key, *buf, buflen, NULL);
+ }
+ #endif
+ 
+ #if defined(CONFIG_NFSD_V4)
+ static int
+-set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
++set_nfsv4_acl_one(struct dentry *dentry, struct vfsmount *mnt,
++		  struct posix_acl *pacl, char *key)
+ {
+ 	int len;
+ 	size_t buflen;
+@@ -445,7 +447,7 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
+ 		goto out;
+ 	}
+ 
+-	error = vfs_setxattr(dentry, key, buf, len, 0);
++	error = vfs_setxattr(dentry, mnt, key, buf, len, 0, NULL);
+ out:
+ 	kfree(buf);
+ 	return error;
+@@ -458,6 +460,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	__be32 error;
+ 	int host_error;
+ 	struct dentry *dentry;
++	struct vfsmount *mnt;
+ 	struct inode *inode;
+ 	struct posix_acl *pacl = NULL, *dpacl = NULL;
+ 	unsigned int flags = 0;
+@@ -468,6 +471,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		return error;
+ 
+ 	dentry = fhp->fh_dentry;
++	mnt = fhp->fh_export->ex_path.mnt;
+ 	inode = dentry->d_inode;
+ 	if (S_ISDIR(inode->i_mode))
+ 		flags = NFS4_ACL_DIR;
+@@ -478,12 +482,14 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	} else if (host_error < 0)
+ 		goto out_nfserr;
+ 
+-	host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
++	host_error = set_nfsv4_acl_one(dentry, mnt, pacl,
++				       POSIX_ACL_XATTR_ACCESS);
+ 	if (host_error < 0)
+ 		goto out_release;
+ 
+ 	if (S_ISDIR(inode->i_mode))
+-		host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
++		host_error = set_nfsv4_acl_one(dentry, mnt, dpacl,
++					       POSIX_ACL_XATTR_DEFAULT);
+ 
+ out_release:
+ 	posix_acl_release(pacl);
+@@ -496,13 +502,13 @@ out_nfserr:
+ }
+ 
+ static struct posix_acl *
+-_get_posix_acl(struct dentry *dentry, char *key)
++_get_posix_acl(struct dentry *dentry, struct vfsmount *mnt, char *key)
+ {
+ 	void *buf = NULL;
+ 	struct posix_acl *pacl = NULL;
+ 	int buflen;
+ 
+-	buflen = nfsd_getxattr(dentry, key, &buf);
++	buflen = nfsd_getxattr(dentry, mnt, key, &buf);
+ 	if (!buflen)
+ 		buflen = -ENODATA;
+ 	if (buflen <= 0)
+@@ -514,14 +520,15 @@ _get_posix_acl(struct dentry *dentry, char *key)
+ }
+ 
+ int
+-nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl)
++nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
++		   struct vfsmount *mnt, struct nfs4_acl **acl)
+ {
+ 	struct inode *inode = dentry->d_inode;
+ 	int error = 0;
+ 	struct posix_acl *pacl = NULL, *dpacl = NULL;
+ 	unsigned int flags = 0;
+ 
+-	pacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_ACCESS);
++	pacl = _get_posix_acl(dentry, mnt, POSIX_ACL_XATTR_ACCESS);
+ 	if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA)
+ 		pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+ 	if (IS_ERR(pacl)) {
+@@ -531,7 +538,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_ac
+ 	}
+ 
+ 	if (S_ISDIR(inode->i_mode)) {
+-		dpacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_DEFAULT);
++		dpacl = _get_posix_acl(dentry, mnt, POSIX_ACL_XATTR_DEFAULT);
+ 		if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA)
+ 			dpacl = NULL;
+ 		else if (IS_ERR(dpacl)) {
+@@ -944,13 +951,13 @@ out:
+ 	return err;
+ }
+ 
+-static void kill_suid(struct dentry *dentry)
++static void kill_suid(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ 	struct iattr	ia;
+ 	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+ 
+ 	mutex_lock(&dentry->d_inode->i_mutex);
+-	notify_change(dentry, &ia);
++	notify_change(dentry, mnt, &ia);
+ 	mutex_unlock(&dentry->d_inode->i_mutex);
+ }
+ 
+@@ -1009,7 +1016,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+ 
+ 	/* clear setuid/setgid flag after write */
+ 	if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
+-		kill_suid(dentry);
++		kill_suid(dentry, exp->ex_path.mnt);
+ 
+ 	if (host_err >= 0 && stable) {
+ 		static ino_t	last_ino;
+@@ -1187,6 +1194,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		int type, dev_t rdev, struct svc_fh *resfhp)
+ {
+ 	struct dentry	*dentry, *dchild = NULL;
++	struct svc_export *exp;
+ 	struct inode	*dirp;
+ 	__be32		err;
+ 	__be32		err2;
+@@ -1204,6 +1212,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		goto out;
+ 
+ 	dentry = fhp->fh_dentry;
++	exp = fhp->fh_export;
+ 	dirp = dentry->d_inode;
+ 
+ 	err = nfserr_notdir;
+@@ -1220,7 +1229,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		host_err = PTR_ERR(dchild);
+ 		if (IS_ERR(dchild))
+ 			goto out_nfserr;
+-		err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
++		err = fh_compose(resfhp, exp, dchild, fhp);
+ 		if (err)
+ 			goto out;
+ 	} else {
+@@ -1270,13 +1279,14 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
+ 		break;
+ 	case S_IFDIR:
+-		host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
++		host_err = vfs_mkdir(dirp, dchild, exp->ex_path.mnt, iap->ia_mode);
+ 		break;
+ 	case S_IFCHR:
+ 	case S_IFBLK:
+ 	case S_IFIFO:
+ 	case S_IFSOCK:
+-		host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
++		host_err = vfs_mknod(dirp, dchild, exp->ex_path.mnt,
++				     iap->ia_mode, rdev);
+ 		break;
+ 	}
+ 	if (host_err < 0) {
+@@ -1284,7 +1294,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		goto out_nfserr;
+ 	}
+ 
+-	if (EX_ISSYNC(fhp->fh_export)) {
++	if (EX_ISSYNC(exp)) {
+ 		err = nfserrno(nfsd_sync_dir(dentry));
+ 		write_inode_now(dchild->d_inode, 1);
+ 	}
+@@ -1514,6 +1524,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 				struct iattr *iap)
+ {
+ 	struct dentry	*dentry, *dnew;
++	struct svc_export *exp;
+ 	__be32		err, cerr;
+ 	int		host_err;
+ 
+@@ -1538,6 +1549,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	if (host_err)
+ 		goto out_nfserr;
+ 
++	exp = fhp->fh_export;
+ 	if (unlikely(path[plen] != 0)) {
+ 		char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
+ 		if (path_alloced == NULL)
+@@ -1545,14 +1557,16 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 		else {
+ 			strncpy(path_alloced, path, plen);
+ 			path_alloced[plen] = 0;
+-			host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced);
++			host_err = vfs_symlink(dentry->d_inode, dnew,
++					       exp->ex_path.mnt, path_alloced);
+ 			kfree(path_alloced);
+ 		}
+ 	} else
+-		host_err = vfs_symlink(dentry->d_inode, dnew, path);
++		host_err = vfs_symlink(dentry->d_inode, dnew, exp->ex_path.mnt,
++				       path);
+ 
+ 	if (!host_err) {
+-		if (EX_ISSYNC(fhp->fh_export))
++		if (EX_ISSYNC(exp))
+ 			host_err = nfsd_sync_dir(dentry);
+ 	}
+ 	err = nfserrno(host_err);
+@@ -1560,7 +1574,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 
+ 	mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ 
+-	cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
++	cerr = fh_compose(resfhp, exp, dnew, fhp);
+ 	dput(dnew);
+ 	if (err==0) err = cerr;
+ out:
+@@ -1615,7 +1629,8 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
+ 		err = nfserrno(host_err);
+ 		goto out_dput;
+ 	}
+-	host_err = vfs_link(dold, dirp, dnew);
++	host_err = vfs_link(dold, tfhp->fh_export->ex_path.mnt, dirp,
++			    dnew, ffhp->fh_export->ex_path.mnt);
+ 	if (!host_err) {
+ 		if (EX_ISSYNC(ffhp->fh_export)) {
+ 			err = nfserrno(nfsd_sync_dir(ddir));
+@@ -1716,7 +1731,8 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
+ 	if (host_err)
+ 		goto out_dput_new;
+ 
+-	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
++	host_err = vfs_rename(fdir, odentry, ffhp->fh_export->ex_path.mnt,
++			      tdir, ndentry, tfhp->fh_export->ex_path.mnt);
+ 	if (!host_err && EX_ISSYNC(tfhp->fh_export)) {
+ 		host_err = nfsd_sync_dir(tdentry);
+ 		if (!host_err)
+@@ -1754,6 +1770,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ 				char *fname, int flen)
+ {
+ 	struct dentry	*dentry, *rdentry;
++	struct svc_export *exp;
+ 	struct inode	*dirp;
+ 	__be32		err;
+ 	int		host_err;
+@@ -1768,6 +1785,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ 	fh_lock_nested(fhp, I_MUTEX_PARENT);
+ 	dentry = fhp->fh_dentry;
+ 	dirp = dentry->d_inode;
++	exp = fhp->fh_export;
+ 
+ 	rdentry = lookup_one_len(fname, dentry, flen);
+ 	host_err = PTR_ERR(rdentry);
+@@ -1789,21 +1807,21 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
+ 
+ 	if (type != S_IFDIR) { /* It's UNLINK */
+ #ifdef MSNFS
+-		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
++		if ((exp->ex_flags & NFSEXP_MSNFS) &&
+ 			(atomic_read(&rdentry->d_count) > 1)) {
+ 			host_err = -EPERM;
+ 		} else
+ #endif
+-		host_err = vfs_unlink(dirp, rdentry);
++		host_err = vfs_unlink(dirp, rdentry, exp->ex_path.mnt);
+ 	} else { /* It's RMDIR */
+-		host_err = vfs_rmdir(dirp, rdentry);
++		host_err = vfs_rmdir(dirp, rdentry, exp->ex_path.mnt);
+ 	}
+ 
+ 	dput(rdentry);
+ 
+ 	if (host_err)
+ 		goto out_drop;
+-	if (EX_ISSYNC(fhp->fh_export))
++	if (EX_ISSYNC(exp))
+ 		host_err = nfsd_sync_dir(dentry);
+ 
+ out_drop:
+@@ -2036,7 +2054,8 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
+ 		return ERR_PTR(-EOPNOTSUPP);
+ 	}
+ 
+-	size = nfsd_getxattr(fhp->fh_dentry, name, &value);
++	size = nfsd_getxattr(fhp->fh_dentry, fhp->fh_export->ex_path.mnt, name,
++			     &value);
+ 	if (size < 0)
+ 		return ERR_PTR(size);
+ 
+@@ -2048,6 +2067,7 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
+ int
+ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
+ {
++	struct vfsmount *mnt;
+ 	struct inode *inode = fhp->fh_dentry->d_inode;
+ 	char *name;
+ 	void *value = NULL;
+@@ -2080,21 +2100,24 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
+ 	} else
+ 		size = 0;
+ 
+-	error = mnt_want_write(fhp->fh_export->ex_path.mnt);
++	mnt = fhp->fh_export->ex_path.mnt;
++	error = mnt_want_write(mnt);
+ 	if (error)
+ 		goto getout;
+ 	if (size)
+-		error = vfs_setxattr(fhp->fh_dentry, name, value, size, 0);
++		error = vfs_setxattr(fhp->fh_dentry, mnt, name, value, size, 0,
++				     NULL);
+ 	else {
+ 		if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
+ 			error = 0;
+ 		else {
+-			error = vfs_removexattr(fhp->fh_dentry, name);
++			error = vfs_removexattr(fhp->fh_dentry, mnt, name,
++						NULL);
+ 			if (error == -ENODATA)
+ 				error = 0;
+ 		}
+ 	}
+-	mnt_drop_write(fhp->fh_export->ex_path.mnt);
++	mnt_drop_write(mnt);
+ 
+ getout:
+ 	kfree(value);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 14ba4d9..4fc3121 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1446,7 +1446,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
+ 	}
+ 	if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
+ 			| FATTR4_WORD0_SUPPORTED_ATTRS)) {
+-		err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
++		err = nfsd4_get_nfs4_acl(rqstp, dentry, exp->ex_path.mnt, &acl);
+ 		aclsupport = (err == 0);
+ 		if (bmval0 & FATTR4_WORD0_ACL) {
+ 			if (err == -EOPNOTSUPP)
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 145b3c8..2ca394f 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -158,7 +158,8 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
+ 	status = mnt_want_write(rec_dir.path.mnt);
+ 	if (status)
+ 		goto out_put;
+-	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
++	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry,
++			   rec_dir.path.mnt, S_IRWXU);
+ 	mnt_drop_write(rec_dir.path.mnt);
+ out_put:
+ 	dput(dentry);
+@@ -263,7 +264,7 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
+ 		return -EINVAL;
+ 	}
+ 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+-	status = vfs_unlink(dir->d_inode, dentry);
++	status = vfs_unlink(dir->d_inode, dentry, rec_dir.path.mnt);
+ 	mutex_unlock(&dir->d_inode->i_mutex);
+ 	return status;
+ }
+@@ -278,7 +279,7 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
+ 	 * a kernel from the future.... */
+ 	nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
+ 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+-	status = vfs_rmdir(dir->d_inode, dentry);
++	status = vfs_rmdir(dir->d_inode, dentry, rec_dir.path.mnt);
+ 	mutex_unlock(&dir->d_inode->i_mutex);
+ 	return status;
+ }
diff --git a/kernel_patches/backport/2.6.27_sles11/mlx4_en_0010_do_not_use_netdev_ops.patch b/kernel_patches/backport/2.6.27_sles11/mlx4_en_0010_do_not_use_netdev_ops.patch
deleted file mode 100644
index 4d3b269..0000000
--- a/kernel_patches/backport/2.6.27_sles11/mlx4_en_0010_do_not_use_netdev_ops.patch
+++ /dev/null
@@ -1,65 +0,0 @@
-From 34a43622ec035aa41a5383c31245838472784c1b Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Sun, 31 May 2009 11:59:25 +0300
-Subject: [PATCH 1/8] mlx4_en: Don't use netdev_ops
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/en_netdev.c |   34 +++++++++++++++-------------------
- 1 files changed, 15 insertions(+), 19 deletions(-)
-
-diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
-index a38adf5..4ad5f3c 100644
---- a/drivers/net/mlx4/en_netdev.c
-+++ b/drivers/net/mlx4/en_netdev.c
-@@ -933,24 +933,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
- 	return 0;
- }
- 
--static const struct net_device_ops mlx4_netdev_ops = {
--	.ndo_open		= mlx4_en_open,
--	.ndo_stop		= mlx4_en_close,
--	.ndo_start_xmit		= mlx4_en_xmit,
--	.ndo_select_queue	= mlx4_en_select_queue,
--	.ndo_get_stats		= mlx4_en_get_stats,
--	.ndo_set_multicast_list	= mlx4_en_set_multicast,
--	.ndo_set_mac_address	= mlx4_en_set_mac,
--	.ndo_validate_addr	= eth_validate_addr,
--	.ndo_change_mtu		= mlx4_en_change_mtu,
--	.ndo_tx_timeout		= mlx4_en_tx_timeout,
--	.ndo_vlan_rx_register	= mlx4_en_vlan_rx_register,
--	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
--	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
--#ifdef CONFIG_NET_POLL_CONTROLLER
--	.ndo_poll_controller	= mlx4_en_netpoll,
--#endif
--};
- 
- int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
- 			struct mlx4_en_port_profile *prof)
-@@ -1026,7 +1008,21 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
- 	/*
- 	 * Initialize netdev entry points
- 	 */
--	dev->netdev_ops = &mlx4_netdev_ops;
-+	dev->open		= mlx4_en_open;
-+	dev->stop		= mlx4_en_close;
-+	dev->hard_start_xmit	= mlx4_en_xmit,
-+	dev->select_queue	= mlx4_en_select_queue,
-+	dev->get_stats		= mlx4_en_get_stats,
-+	dev->set_multicast_list	= mlx4_en_set_multicast,
-+	dev->set_mac_address	= mlx4_en_set_mac,
-+	dev->change_mtu		= mlx4_en_change_mtu,
-+	dev->tx_timeout		= mlx4_en_tx_timeout,
-+	dev->vlan_rx_register	= mlx4_en_vlan_rx_register,
-+	dev->vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
-+	dev->vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
-+#ifdef CONFIG_NET_POLL_CONTROLLER
-+	dev->poll_controller	= mlx4_en_netpoll,
-+#endif
- 	dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
- 
- 	SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
--- 
-1.6.1.3
-
diff --git a/kernel_patches/backport/2.6.27_sles11/mlx4_en_0030_lro_backport.patch b/kernel_patches/backport/2.6.27_sles11/mlx4_en_0030_lro_backport.patch
deleted file mode 100644
index eefeae0..0000000
--- a/kernel_patches/backport/2.6.27_sles11/mlx4_en_0030_lro_backport.patch
+++ /dev/null
@@ -1,893 +0,0 @@
-From 4f3262d88349cd4ac0cc0b8ecd458b7db4fe63e5 Mon Sep 17 00:00:00 2001
-From: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
-Date: Sun, 31 May 2009 14:57:40 +0300
-Subject: [PATCH] mlx4_en: use own lro implemetation
-
-Signed-off-by: Yevgeny Petrilin <yevgenyp at mellanox.co.il>
----
- drivers/net/mlx4/Makefile     |    2 +-
- drivers/net/mlx4/en_ethtool.c |   17 --
- drivers/net/mlx4/en_lro.c     |  540 +++++++++++++++++++++++++++++++++++++++++
- drivers/net/mlx4/en_rx.c      |  109 +++------
- drivers/net/mlx4/mlx4_en.h    |   52 ++++-
- 5 files changed, 623 insertions(+), 97 deletions(-)
- create mode 100644 drivers/net/mlx4/en_lro.c
-
-diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
-index 87c2259..ed94870 100644
---- a/drivers/net/mlx4/Makefile
-+++ b/drivers/net/mlx4/Makefile
-@@ -6,4 +6,4 @@ mlx4_core-y :=	alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- obj-$(CONFIG_MLX4_EN)               += mlx4_en.o
- 
- mlx4_en-y := 	en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
--		en_resources.o en_netdev.o en_frag.o
-+		en_resources.o en_netdev.o en_frag.o en_lro.o
-diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
-index 091f990..19a10f3 100644
---- a/drivers/net/mlx4/en_ethtool.c
-+++ b/drivers/net/mlx4/en_ethtool.c
-@@ -39,21 +39,6 @@
- #include "en_port.h"
- 
- 
--static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
--{
--	int i;
--
--	priv->port_stats.lro_aggregated = 0;
--	priv->port_stats.lro_flushed = 0;
--	priv->port_stats.lro_no_desc = 0;
--
--	for (i = 0; i < priv->rx_ring_num; i++) {
--		priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
--		priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
--		priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
--	}
--}
--
- static void
- mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
- {
-@@ -163,8 +148,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
- 
- 	spin_lock_bh(&priv->stats_lock);
- 
--	mlx4_en_update_lro_stats(priv);
--
- 	for (i = 0; i < NUM_MAIN_STATS; i++)
- 		data[index++] = ((unsigned long *) &priv->stats)[i];
- 	for (i = 0; i < NUM_PORT_STATS; i++)
-diff --git a/drivers/net/mlx4/en_lro.c b/drivers/net/mlx4/en_lro.c
-new file mode 100644
-index 0000000..bb5563f
---- /dev/null
-+++ b/drivers/net/mlx4/en_lro.c
-@@ -0,0 +1,540 @@
-+/*
-+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
-+ *
-+ * This software is available to you under a choice of one of two
-+ * licenses.  You may choose to be licensed under the terms of the GNU
-+ * General Public License (GPL) Version 2, available from the file
-+ * COPYING in the main directory of this source tree, or the
-+ * OpenIB.org BSD license below:
-+ *
-+ *     Redistribution and use in source and binary forms, with or
-+ *     without modification, are permitted provided that the following
-+ *     conditions are met:
-+ *
-+ *      - Redistributions of source code must retain the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer.
-+ *
-+ *      - Redistributions in binary form must reproduce the above
-+ *        copyright notice, this list of conditions and the following
-+ *        disclaimer in the documentation and/or other materials
-+ *        provided with the distribution.
-+ *
-+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-+ * SOFTWARE.
-+ *
-+ */
-+
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <net/tcp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/delay.h>
-+
-+#include "mlx4_en.h"
-+
-+/* LRO hash function - using sum of source and destination port LSBs is
-+ * good enough */
-+#define LRO_INDEX(th, size) \
-+	((*((u8*) &th->source + 1) + *((u8*) &th->dest + 1)) & (size - 1))
-+
-+/* #define CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+static void mlx4_en_lro_validate(struct mlx4_en_priv* priv, struct mlx4_en_lro *lro)
-+{
-+	int i;
-+	int size, size2;
-+	struct sk_buff *skb = lro->skb;
-+	skb_frag_t *frags;
-+	int len, len2;
-+	int cur_skb = 0;
-+
-+	/* Sum fragment sizes of first skb */
-+	len = skb->len;
-+	size = skb_headlen(skb);
-+	frags = skb_shinfo(skb)->frags;
-+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+		size += frags[i].size;
-+	}
-+
-+	/* Add in fragments of linked skb's */
-+	skb = skb_shinfo(skb)->frag_list;
-+	while (skb) {
-+		cur_skb++;
-+		len2 = skb->len;
-+		if (skb_headlen(skb)) {
-+			mlx4_err(priv->mdev, "Bad LRO format: non-zero headlen "
-+				  "in fraglist (skb:%d)\n", cur_skb);
-+			return;
-+		}
-+
-+		size2 = 0;
-+		frags = skb_shinfo(skb)->frags;
-+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-+			size2 += frags[i].size;
-+		}
-+
-+		if (size2 != len2) {
-+			mlx4_err(priv->mdev, "Bad skb size:%d in LRO fraglist. "
-+			          "Expected:%d (skb:%d)\n", size2, len2, cur_skb);
-+			return;
-+		}
-+		size += size2;
-+		skb = skb->next;
-+	}
-+
-+	if (size != len)
-+		mlx4_err(priv->mdev, "Bad LRO size:%d expected:%d\n", size, len);
-+}
-+#endif /* MLX4_EN_DEBUG_LRO */
-+
-+static void mlx4_en_lro_flush_single(struct mlx4_en_priv* priv,
-+		   struct mlx4_en_rx_ring* ring, struct mlx4_en_lro *lro)
-+{
-+	struct sk_buff *skb = lro->skb;
-+	struct iphdr *iph = (struct iphdr *) skb->data;
-+	struct tcphdr *th = (struct tcphdr *)(iph + 1);
-+	unsigned int headlen = skb_headlen(skb);
-+	__wsum tcp_hdr_csum;
-+	u32 *ts;
-+
-+	/* Update IP length and checksum */
-+	iph->tot_len = htons(lro->tot_len);
-+	iph->check = 0;
-+	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-+
-+	/* Update latest TCP ack, window, psh, and timestamp */
-+	th->ack_seq = lro->ack_seq;
-+	th->window = lro->window;
-+	th->psh = !!lro->psh;
-+	if (lro->has_timestamp) {
-+		ts = (u32 *) (th + 1);
-+		ts[1] = htonl(lro->tsval);
-+		ts[2] = lro->tsecr;
-+	}
-+	th->check = 0;
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, 0);
-+	lro->data_csum = csum_add(lro->data_csum, tcp_hdr_csum);
-+	th->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
-+				      lro->tot_len - (iph->ihl << 2),
-+				      IPPROTO_TCP, lro->data_csum);
-+
-+	/* Update skb */
-+	skb->len = lro->tot_len;
-+	skb->data_len = lro->tot_len - headlen;
-+	skb->truesize = skb->len + sizeof(struct sk_buff);
-+	skb_shinfo(skb)->gso_size = lro->mss;
-+
-+#ifdef CONFIG_MLX4_EN_DEBUG_LRO
-+	mlx4_en_lro_validate(priv, lro);
-+#endif /* CONFIG_MLX4_EN_DEBUG_LRO */
-+
-+	/* Push it up the stack */
-+	if (priv->vlgrp && lro->has_vlan)
-+		vlan_hwaccel_receive_skb(skb, priv->vlgrp,
-+					be16_to_cpu(lro->vlan_prio));
-+	else
-+		netif_receive_skb(skb);
-+	priv->dev->last_rx = jiffies;
-+
-+	/* Increment stats */
-+	priv->port_stats.lro_flushed++;
-+
-+	/* Move session back to the free list */
-+	hlist_del(&lro->node);
-+	hlist_del(&lro->flush_node);
-+	hlist_add_head(&lro->node, &ring->lro_free);
-+}
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_flush, flush_node) {
-+		if (all || time_after(jiffies, lro->expires))
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+	}
-+}
-+
-+static inline int mlx4_en_lro_append(struct mlx4_en_priv *priv,
-+				   struct mlx4_en_lro *lro,
-+				   struct mlx4_en_rx_desc *rx_desc,
-+				   struct skb_frag_struct *skb_frags,
-+				   struct mlx4_en_rx_alloc *page_alloc,
-+				   unsigned int data_len,
-+				   int hlen)
-+{
-+	struct sk_buff *skb = lro->skb_last;
-+	struct skb_shared_info *info;
-+	struct skb_frag_struct *frags_copy;
-+	int nr_frags;
-+
-+	if (skb_shinfo(skb)->nr_frags + priv->num_frags > MAX_SKB_FRAGS)
-+		return -ENOMEM;
-+
-+	info = skb_shinfo(skb);
-+
-+	/* Copy fragments from descriptor ring to skb */
-+	frags_copy = info->frags + info->nr_frags;
-+	nr_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
-+						frags_copy,
-+						page_alloc,
-+						data_len + hlen);
-+	if (!nr_frags) {
-+		en_dbg(DRV, priv, "Failed completing rx desc during LRO append\n");
-+		return -ENOMEM;
-+	}
-+
-+	/* Skip over headers */
-+	frags_copy[0].page_offset += hlen;
-+
-+	if (nr_frags == 1)
-+		frags_copy[0].size = data_len;
-+	else {
-+		/* Adjust size of last fragment to match packet length.
-+		 * Note: if this fragment is also the first one, the
-+		 *       operation is completed in the next line */
-+		frags_copy[nr_frags - 1].size = hlen + data_len -
-+				priv->frag_info[nr_frags - 1].frag_prefix_size;
-+
-+		/* Adjust size of first fragment */
-+		frags_copy[0].size -= hlen;
-+	}
-+
-+	/* Update skb bookkeeping */
-+	skb->len += data_len;
-+	skb->data_len += data_len;
-+	info->nr_frags += nr_frags;
-+	return 0;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_find_session(struct mlx4_en_dev *mdev,
-+						       struct mlx4_en_rx_ring *ring,
-+						       struct iphdr *iph,
-+						       struct tcphdr *th)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node;
-+	int index = LRO_INDEX(th, mdev->profile.num_lro);
-+	struct hlist_head *list = &ring->lro_hash[index];
-+
-+	hlist_for_each_entry(lro, node, list, node) {
-+		if (lro->sport_dport == *((u32*) &th->source) &&
-+		    lro->saddr == iph->saddr &&
-+		    lro->daddr == iph->daddr)
-+			return lro;
-+	}
-+	return NULL;
-+}
-+
-+static inline struct mlx4_en_lro *mlx4_en_lro_alloc_session(struct mlx4_en_priv *priv,
-+							struct mlx4_en_rx_ring *ring)
-+{
-+	return hlist_empty(&ring->lro_free) ? NULL :
-+		hlist_entry(ring->lro_free.first, struct mlx4_en_lro, node);
-+}
-+
-+static __wsum mlx4_en_lro_tcp_data_csum(struct iphdr *iph,
-+					struct tcphdr *th, int len)
-+{
-+	__wsum tcp_csum;
-+	__wsum tcp_hdr_csum;
-+	__wsum tcp_ps_hdr_csum;
-+
-+	tcp_csum = ~csum_unfold(th->check);
-+	tcp_hdr_csum = csum_partial((u8 *)th, th->doff << 2, tcp_csum);
-+
-+	tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-+					     len + (th->doff << 2),
-+					     IPPROTO_TCP, 0);
-+
-+	return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
-+			tcp_ps_hdr_csum);
-+}
-+
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+					  struct mlx4_en_rx_desc *rx_desc,
-+					  struct skb_frag_struct *skb_frags,
-+					  unsigned int length,
-+					  struct mlx4_cqe *cqe)
-+{
-+	struct mlx4_en_dev *mdev = priv->mdev;
-+	struct mlx4_en_lro *lro;
-+	struct sk_buff *skb;
-+	struct iphdr *iph;
-+	struct tcphdr *th;
-+	dma_addr_t dma;
-+	int tcp_hlen;
-+	int tcp_data_len;
-+	int hlen;
-+	u16 ip_len;
-+	void *va;
-+	u32 *ts;
-+	u32 seq;
-+	u32 tsval = (u32) ~0UL;
-+	u32 tsecr = 0;
-+	u32 ack_seq;
-+	u16 window;
-+
-+	/* This packet is eligible for LRO if it is:
-+	 * - DIX Ethernet (type interpretation)
-+	 * - TCP/IP (v4)
-+	 * - without IP options
-+	 * - not an IP fragment */
-+	if (!mlx4_en_can_lro(cqe->status))
-+			return -1;
-+
-+	/* Get pointer to TCP header. We already know that the packet is DIX Ethernet/IPv4/TCP
-+	 * with no VLAN (HW stripped it) and no IP options */
-+	va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
-+	iph = va + ETH_HLEN;
-+	th = (struct tcphdr *)(iph + 1);
-+
-+	/* Synchronsize headers for processing */
-+	dma = be64_to_cpu(rx_desc->data[0].addr);
-+#define MAX_LRO_HEADER		(ETH_HLEN + \
-+				 sizeof(*iph) + \
-+				 sizeof(*th) + \
-+				 TCPOLEN_TSTAMP_ALIGNED)
-+	dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
-+				      MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+
-+	/* We only handle aligned timestamp options */
-+	tcp_hlen = (th->doff << 2);
-+	if (tcp_hlen == sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) {
-+		ts = (u32*) (th + 1);
-+		if (unlikely(*ts != htonl((TCPOPT_NOP << 24) |
-+					  (TCPOPT_NOP << 16) |
-+					  (TCPOPT_TIMESTAMP << 8) |
-+					  TCPOLEN_TIMESTAMP)))
-+			goto sync_device;
-+		tsval = ntohl(ts[1]);
-+		tsecr = ts[2];
-+	} else if (tcp_hlen != sizeof(*th))
-+		goto sync_device;
-+	
-+
-+	/* At this point we know we have a TCP packet that is likely to be
-+	 * eligible for LRO. Therefore, see now if we have an oustanding
-+	 * session that corresponds to this packet so we could flush it if
-+	 * something still prevents LRO */
-+	lro = mlx4_en_lro_find_session(mdev, ring, iph, th);
-+
-+	/* ensure no bits set besides ack or psh */
-+	if (th->fin || th->syn || th->rst || th->urg || th->ece ||
-+	    th->cwr || !th->ack) {
-+		if (lro) {
-+			/* First flush session to keep packets in-order */
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		}
-+		goto sync_device;
-+	}
-+
-+	/* Get ip length and verify that the frame is big enough */
-+	ip_len = ntohs(iph->tot_len);
-+	if (unlikely(length < ETH_HLEN + ip_len)) {
-+		en_warn(priv, "Cannot LRO - ip payload exceeds frame!\n");
-+		goto sync_device;
-+	}
-+
-+	/* Get TCP payload length */
-+	tcp_data_len = ip_len - tcp_hlen - sizeof(struct iphdr);
-+	seq = ntohl(th->seq);
-+	if (!tcp_data_len)
-+		goto flush_session;
-+
-+	if (lro) {
-+		/* Check VLAN tag */
-+		if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+			if (cqe->sl_vid != lro->vlan_prio || !lro->has_vlan) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else if (lro->has_vlan) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* Check sequence number */
-+		if (unlikely(seq != lro->next_seq)) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto sync_device;
-+		}
-+
-+		/* If the cummulative IP length is over 64K, flush and start
-+		 * a new session */
-+		if (lro->tot_len + tcp_data_len > 0xffff) {
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+			goto new_session;
-+		}
-+
-+		/* Check timestamps */
-+		if (tcp_hlen != sizeof(*th)) {
-+			if (unlikely(lro->tsval > tsval || !tsecr))
-+				goto sync_device;
-+		}
-+
-+		window = th->window;
-+		ack_seq = th->ack_seq;
-+		if (likely(tcp_data_len)) {
-+			/* Append the data! */
-+			hlen = ETH_HLEN + sizeof(struct iphdr) + tcp_hlen;
-+			if (mlx4_en_lro_append(priv, lro, rx_desc, skb_frags,
-+							ring->page_alloc,
-+							tcp_data_len, hlen)) {
-+				mlx4_en_lro_flush_single(priv, ring, lro);
-+				goto sync_device;
-+			}
-+		} else {
-+			/* No data */
-+			dma_sync_single_range_for_device(&mdev->dev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+		}
-+
-+		/* Update session */
-+		lro->psh |= th->psh;
-+		lro->next_seq += tcp_data_len;
-+		lro->data_csum = csum_block_add(lro->data_csum,
-+					mlx4_en_lro_tcp_data_csum(iph, th,
-+								  tcp_data_len),
-+					lro->tot_len);
-+		lro->tot_len += tcp_data_len;
-+		lro->tsval = tsval;
-+		lro->tsecr = tsecr;
-+		lro->ack_seq = ack_seq;
-+		lro->window = window;
-+		if (tcp_data_len > lro->mss)
-+			lro->mss = tcp_data_len;
-+		priv->port_stats.lro_aggregated++;
-+		if (th->psh)
-+			mlx4_en_lro_flush_single(priv, ring, lro);
-+		return 0;
-+	}
-+
-+new_session:
-+	if (th->psh)
-+		goto sync_device;
-+	lro = mlx4_en_lro_alloc_session(priv, ring);
-+	if (lro) {
-+		skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags, ring->page_alloc,
-+							     ETH_HLEN + ip_len);
-+		if (skb) {
-+			int index;
-+
-+			/* Add in the skb */
-+			lro->skb = skb;
-+			lro->skb_last = skb;
-+			skb->protocol = eth_type_trans(skb, priv->dev);
-+			skb->ip_summed = CHECKSUM_UNNECESSARY;
-+
-+			/* Initialize session */
-+			lro->saddr = iph->saddr;
-+			lro->daddr = iph->daddr;
-+			lro->sport_dport = *((u32*) &th->source);
-+
-+			lro->next_seq = seq + tcp_data_len;
-+			lro->tot_len = ip_len;
-+			lro->psh = th->psh;
-+			lro->ack_seq = th->ack_seq;
-+			lro->window = th->window;
-+			lro->mss = tcp_data_len;
-+			lro->data_csum = mlx4_en_lro_tcp_data_csum(iph, th,
-+						tcp_data_len);
-+
-+			/* Handle vlans */
-+			if (cqe->vlan_my_qpn & MLX4_CQE_VLAN_PRESENT_MASK) {
-+				lro->vlan_prio = cqe->sl_vid;
-+				lro->has_vlan = 1;
-+			} else
-+				lro->has_vlan = 0;
-+
-+			/* Handle timestamps */
-+			if (tcp_hlen != sizeof(*th)) {
-+				lro->tsval = tsval;
-+				lro->tsecr = tsecr;
-+				lro->has_timestamp = 1;
-+			} else {
-+				lro->tsval = (u32) ~0UL;
-+				lro->has_timestamp = 0;
-+			}
-+
-+			/* Activate this session */
-+			lro->expires = jiffies + HZ / 25;
-+			hlist_del(&lro->node);
-+			index = LRO_INDEX(th, mdev->profile.num_lro);
-+
-+			hlist_add_head(&lro->node, &ring->lro_hash[index]);
-+			hlist_add_head(&lro->flush_node, &ring->lro_flush);
-+			priv->port_stats.lro_aggregated++;
-+			return 0;
-+		} else {
-+			/* Packet is dropped because we were not able to allocate new
-+			 * page for fragments */
-+			dma_sync_single_range_for_device(&mdev->pdev->dev, dma,
-+							 0, MAX_LRO_HEADER,
-+							 DMA_FROM_DEVICE);
-+			return 0;
-+		}
-+	} else {
-+		priv->port_stats.lro_no_desc++;
-+	}
-+
-+flush_session:
-+	if (lro)
-+		mlx4_en_lro_flush_single(priv, ring, lro);
-+sync_device:
-+	dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
-+					 MAX_LRO_HEADER, DMA_FROM_DEVICE);
-+	return -1;
-+}
-+
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring)
-+{
-+	struct mlx4_en_lro *lro;
-+	struct hlist_node *node, *tmp;
-+
-+	hlist_for_each_entry_safe(lro, node, tmp, &ring->lro_free, node) {
-+		hlist_del(&lro->node);
-+		kfree(lro);
-+	}
-+	kfree(ring->lro_hash);
-+}
-+
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro)
-+{
-+	struct mlx4_en_lro *lro;
-+	int i;
-+
-+	INIT_HLIST_HEAD(&ring->lro_free);
-+	INIT_HLIST_HEAD(&ring->lro_flush);
-+	ring->lro_hash = kmalloc(sizeof(struct hlist_head) * num_lro,
-+				 GFP_KERNEL);
-+	if (!ring->lro_hash)
-+		return -ENOMEM;
-+
-+	for (i = 0; i < num_lro; i++) {
-+		INIT_HLIST_HEAD(&ring->lro_hash[i]);
-+		lro = kzalloc(sizeof(struct mlx4_en_lro), GFP_KERNEL);
-+		if (!lro) {
-+			mlx4_en_lro_destroy(ring);
-+			return -ENOMEM;
-+		}
-+		INIT_HLIST_NODE(&lro->node);
-+		INIT_HLIST_NODE(&lro->flush_node);
-+		hlist_add_head(&lro->node, &ring->lro_free);
-+	}
-+	return 0;
-+}
-+
-+
-diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
-index a4b1203..6bc6113 100644
---- a/drivers/net/mlx4/en_rx.c
-+++ b/drivers/net/mlx4/en_rx.c
-@@ -51,18 +51,6 @@ static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
- 	return;
- }
- 
--static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
--				   void **ip_hdr, void **tcpudp_hdr,
--				   u64 *hdr_flags, void *priv)
--{
--	*mac_hdr = page_address(frags->page) + frags->page_offset;
--	*ip_hdr = *mac_hdr + ETH_HLEN;
--	*tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
--	*hdr_flags = LRO_IPV4 | LRO_TCP;
--
--	return 0;
--}
--
- static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_rx_desc *rx_desc,
- 			      struct skb_frag_struct *skb_frags,
-@@ -455,23 +443,14 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
- 	}
- 	ring->buf = ring->wqres.buf.direct.buf;
- 
--	/* Configure lro mngr */
--	memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
--	ring->lro.dev = priv->dev;
--	ring->lro.features = LRO_F_NAPI;
--	ring->lro.frag_align_pad = NET_IP_ALIGN;
--	ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
--	ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
--	ring->lro.max_desc = mdev->profile.num_lro;
--	ring->lro.max_aggr = MAX_SKB_FRAGS;
--	ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
--				    sizeof(struct net_lro_desc),
--				    GFP_KERNEL);
--	if (!ring->lro.lro_arr) {
--		en_err(priv, "Failed to allocate lro array\n");
--		goto err_map;
-+	/* Allocate LRO sessions */
-+	if (mdev->profile.num_lro) {
-+		err =  mlx4_en_lro_init(ring, mdev->profile.num_lro);
-+		if (err) {
-+			en_err(priv, "Failed allocating lro sessions\n");
-+			goto err_map;
-+		}
- 	}
--	ring->lro.get_frag_header = mlx4_en_get_frag_header;
- 
- 	return 0;
- 
-@@ -588,7 +567,8 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 
--	kfree(ring->lro.lro_arr);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_destroy(ring);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	vfree(ring->rx_info);
-@@ -608,12 +588,12 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
- 
- 
- /* Unmap a completed descriptor and free unused pages */
--static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
--				    struct mlx4_en_rx_desc *rx_desc,
--				    struct skb_frag_struct *skb_frags,
--				    struct skb_frag_struct *skb_frags_rx,
--				    struct mlx4_en_rx_alloc *page_alloc,
--				    int length)
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_en_frag_info *frag_info;
-@@ -656,11 +636,11 @@ fail:
- }
- 
- 
--static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
--				      struct mlx4_en_rx_desc *rx_desc,
--				      struct skb_frag_struct *skb_frags,
--				      struct mlx4_en_rx_alloc *page_alloc,
--				      unsigned int length)
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length)
- {
- 	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct sk_buff *skb;
-@@ -901,14 +881,13 @@ out:
- int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
- {
- 	struct mlx4_en_priv *priv = netdev_priv(dev);
-+	struct mlx4_en_dev *mdev = priv->mdev;
- 	struct mlx4_cqe *cqe;
- 	struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- 	struct skb_frag_struct *skb_frags;
--	struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
- 	struct mlx4_en_rx_desc *rx_desc;
- 	struct sk_buff *skb;
- 	int index;
--	int nr;
- 	unsigned int length;
- 	int polled = 0;
- 	int ip_summed;
-@@ -946,40 +925,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 
- 		if (likely(priv->rx_csum)) {
- 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
--			    (cqe->checksum == cpu_to_be16(0xffff))) {
-+			    (cqe->checksum == 0xffff)) {
- 				priv->port_stats.rx_chksum_good++;
--				/* This packet is eligible for LRO if it is:
--				 * - DIX Ethernet (type interpretation)
--				 * - TCP/IP (v4)
--				 * - without IP options
--				 * - not an IP fragment */
--				if (mlx4_en_can_lro(cqe->status) &&
--				    dev->features & NETIF_F_LRO) {
--
--					nr = mlx4_en_complete_rx_desc(
--						priv, rx_desc,
--						skb_frags, lro_frags,
--						ring->page_alloc, length);
--					if (!nr)
--						goto next;
--
--					if (priv->vlgrp && (cqe->vlan_my_qpn &
--							    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
--						lro_vlan_hwaccel_receive_frags(
--						       &ring->lro, lro_frags,
--						       length, length,
--						       priv->vlgrp,
--						       be16_to_cpu(cqe->sl_vid),
--						       NULL, 0);
--					} else
--						lro_receive_frags(&ring->lro,
--								  lro_frags,
--								  length,
--								  length,
--								  NULL, 0);
--
-+				if (mdev->profile.num_lro &&
-+				    !mlx4_en_lro_rx(priv, ring, rx_desc,
-+						    skb_frags, length, cqe))
- 					goto next;
--				}
- 
- 				/* LRO not possible, complete processing here */
- 				ip_summed = CHECKSUM_UNNECESSARY;
-@@ -1002,7 +953,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 
- 		skb->ip_summed = ip_summed;
- 		skb->protocol = eth_type_trans(skb, dev);
--		skb_record_rx_queue(skb, cq->ring);
- 
- 		/* Push it up the stack */
- 		if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
-@@ -1012,6 +962,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 		} else
- 			netif_receive_skb(skb);
- 
-+		dev->last_rx = jiffies;
-+
- next:
- 		++cq->mcq.cons_index;
- 		index = (cq->mcq.cons_index) & ring->size_mask;
-@@ -1019,13 +971,15 @@ next:
- 		if (++polled == budget) {
- 			/* We are here because we reached the NAPI budget -
- 			 * flush only pending LRO sessions */
--			lro_flush_all(&ring->lro);
-+			if (mdev->profile.num_lro)
-+				mlx4_en_lro_flush(priv, ring, 0);
- 			goto out;
- 		}
- 	}
- 
- 	/* If CQ is empty flush all LRO sessions unconditionally */
--	lro_flush_all(&ring->lro);
-+	if (mdev->profile.num_lro)
-+		mlx4_en_lro_flush(priv, ring, 1);
- 
- out:
- 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
-@@ -1042,7 +996,6 @@ out:
- 	return polled;
- }
- 
--
- void mlx4_en_rx_irq(struct mlx4_cq *mcq)
- {
- 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
-diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
-index b45774c..e9174c4 100644
---- a/drivers/net/mlx4/mlx4_en.h
-+++ b/drivers/net/mlx4/mlx4_en.h
-@@ -296,11 +296,41 @@ struct mlx4_en_rx_desc {
- 	struct mlx4_wqe_data_seg data[0];
- };
- 
-+struct mlx4_en_lro {
-+	struct hlist_node node;
-+	struct hlist_node flush_node;
-+
-+	/* Id fields come first: */
-+	u32 saddr;
-+	u32 daddr;
-+	u32 sport_dport;
-+	u32 next_seq;
-+	u16 tot_len;
-+	u8 psh;
-+
-+	u32 tsval;
-+	u32 tsecr;
-+	u32 ack_seq;
-+	u16 window;
-+	__be16 vlan_prio;
-+	u16 has_vlan;
-+	u16 has_timestamp;
-+	u16 mss;
-+	__wsum  data_csum;
-+
-+	unsigned long expires;
-+	struct sk_buff *skb;
-+	struct sk_buff *skb_last;
-+};
-+
- struct mlx4_en_rx_ring {
- 	struct mlx4_srq srq;
- 	struct mlx4_hwq_resources wqres;
- 	struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
--	struct net_lro_mgr lro;
-+	struct mlx4_en_lro lro;
-+	struct hlist_head *lro_hash;
-+	struct hlist_head lro_free;
-+	struct hlist_head lro_flush;
- 	u32 size ;	/* number of Rx descs*/
- 	u32 actual_size;
- 	u32 size_mask;
-@@ -592,12 +622,32 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
- void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
- 				 struct mlx4_en_rss_map *rss_map,
- 				 int num_entries, int num_rings);
-+
-+void mlx4_en_lro_flush(struct mlx4_en_priv* priv, struct mlx4_en_rx_ring *ring, u8 all);
-+int mlx4_en_lro_rx(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring,
-+		   struct mlx4_en_rx_desc *rx_desc,
-+		   struct skb_frag_struct *skb_frags,
-+		   unsigned int length, struct mlx4_cqe *cqe);
-+void mlx4_en_lro_destroy(struct mlx4_en_rx_ring *ring);
-+int mlx4_en_lro_init(struct mlx4_en_rx_ring *ring, int num_lro);
-+
- void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
- int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
- void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
- int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
- void mlx4_en_rx_refill(struct work_struct *work);
- void mlx4_en_rx_irq(struct mlx4_cq *mcq);
-+struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
-+			       struct mlx4_en_rx_desc *rx_desc,
-+			       struct skb_frag_struct *skb_frags,
-+			       struct mlx4_en_rx_alloc *page_alloc,
-+			       unsigned int length);
-+int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
-+			     struct mlx4_en_rx_desc *rx_desc,
-+			     struct skb_frag_struct *skb_frags,
-+			     struct skb_frag_struct *skb_frags_rx,
-+			     struct mlx4_en_rx_alloc *page_alloc,
-+			     int length);
- 
- int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
- int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
--- 
-1.6.0
-
diff --git a/kernel_patches/backport/2.6.27_sles11/sdp_0080_revert_to_2_6_28.patch b/kernel_patches/backport/2.6.27_sles11/sdp_0080_revert_to_2_6_28.patch
deleted file mode 100644
index 59385b0..0000000
--- a/kernel_patches/backport/2.6.27_sles11/sdp_0080_revert_to_2_6_28.patch
+++ /dev/null
@@ -1,92 +0,0 @@
-diff --git a/drivers/infiniband/ulp/sdp/sdp_main.c b/drivers/infiniband/ulp/sdp/sdp_main.c
-index 7a38c47..51801e0 100644
---- a/drivers/infiniband/ulp/sdp/sdp_main.c
-+++ b/drivers/infiniband/ulp/sdp/sdp_main.c
-@@ -580,7 +580,7 @@ adjudge_to_death:
- 		/* TODO: tcp_fin_time to get timeout */
- 		sdp_dbg(sk, "%s: entering time wait refcnt %d\n", __func__,
- 			atomic_read(&sk->sk_refcnt));
--		percpu_counter_inc(sk->sk_prot->orphan_count);
-+		atomic_inc(sk->sk_prot->orphan_count);
- 	}
- 
- 	/* TODO: limit number of orphaned sockets.
-@@ -861,7 +861,7 @@ void sdp_cancel_dreq_wait_timeout(struct sdp_sock *ssk)
- 		sock_put(&ssk->isk.sk, SOCK_REF_DREQ_TO);
- 	}
- 
--	percpu_counter_dec(ssk->isk.sk.sk_prot->orphan_count);
-+	atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
- }
- 
- void sdp_destroy_work(struct work_struct *work)
-@@ -902,7 +902,7 @@ void sdp_dreq_wait_timeout_work(struct work_struct *work)
- 	sdp_sk(sk)->dreq_wait_timeout = 0;
- 
- 	if (sk->sk_state == TCP_FIN_WAIT1)
--		percpu_counter_dec(ssk->isk.sk.sk_prot->orphan_count);
-+		atomic_dec(ssk->isk.sk.sk_prot->orphan_count);
- 
- 	sdp_exch_state(sk, TCPF_LAST_ACK | TCPF_FIN_WAIT1, TCP_TIME_WAIT);
- 
-@@ -2162,9 +2162,9 @@ void sdp_urg(struct sdp_sock *ssk, struct sk_buff *skb)
- 		sk->sk_data_ready(sk, 0);
- }
- 
--static struct percpu_counter *sockets_allocated;
-+static atomic_t sockets_allocated;
- static atomic_t memory_allocated;
--static struct percpu_counter *orphan_count;
-+static atomic_t orphan_count;
- static int memory_pressure;
- struct proto sdp_proto = {
-         .close       = sdp_close,
-@@ -2182,8 +2182,10 @@ struct proto sdp_proto = {
-         .get_port    = sdp_get_port,
- 	/* Wish we had this: .listen   = sdp_listen */
- 	.enter_memory_pressure = sdp_enter_memory_pressure,
-+	.sockets_allocated = &sockets_allocated,
- 	.memory_allocated = &memory_allocated,
- 	.memory_pressure = &memory_pressure,
-+	.orphan_count = &orphan_count,
-         .sysctl_mem             = sysctl_tcp_mem,
-         .sysctl_wmem            = sysctl_tcp_wmem,
-         .sysctl_rmem            = sysctl_tcp_rmem,
-@@ -2538,15 +2540,6 @@ static int __init sdp_init(void)
- 	spin_lock_init(&sock_list_lock);
- 	spin_lock_init(&sdp_large_sockets_lock);
- 
--	sockets_allocated = kmalloc(sizeof(*sockets_allocated), GFP_KERNEL);
--	orphan_count = kmalloc(sizeof(*orphan_count), GFP_KERNEL);
--	percpu_counter_init(sockets_allocated, 0);
--	percpu_counter_init(orphan_count, 0);
--
--	sdp_proto.sockets_allocated = sockets_allocated;
--	sdp_proto.orphan_count = orphan_count;
--
--
- 	sdp_workqueue = create_singlethread_workqueue("sdp");
- 	if (!sdp_workqueue) {
- 		return -ENOMEM;
-@@ -2581,9 +2574,9 @@ static void __exit sdp_exit(void)
- 	sock_unregister(PF_INET_SDP);
- 	proto_unregister(&sdp_proto);
- 
--	if (percpu_counter_read_positive(orphan_count))
--		printk(KERN_WARNING "%s: orphan_count %lld\n", __func__,
--		       percpu_counter_read_positive(orphan_count));
-+	if (atomic_read(&orphan_count))
-+		printk(KERN_WARNING "%s: orphan_count %d\n", __func__,
-+		       atomic_read(&orphan_count));
- 	destroy_workqueue(sdp_workqueue);
- 	flush_scheduled_work();
- 
-@@ -2596,8 +2589,6 @@ static void __exit sdp_exit(void)
- 	sdp_proc_unregister();
- 
- 	ib_unregister_client(&sdp_client);
--	kfree(orphan_count);
--	kfree(sockets_allocated);
- }
- 
- module_init(sdp_init);
diff --git a/kernel_patches/backport/2.6.27_sles11/to_sles11.patch b/kernel_patches/backport/2.6.27_sles11/to_sles11.patch
deleted file mode 100644
index 6b50549..0000000
--- a/kernel_patches/backport/2.6.27_sles11/to_sles11.patch
+++ /dev/null
@@ -1,493 +0,0 @@
-diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
-index 299e075..5349778 100644
---- a/drivers/scsi/libiscsi.c
-+++ b/drivers/scsi/libiscsi.c
-@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
- 		scsi_queue_work(conn->session->host, &conn->xmitwork);
- }
- 
--static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
-+static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
- {
- 	struct iscsi_cls_session *cls_session;
- 	struct iscsi_session *session;
- 	struct iscsi_conn *conn;
--	enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
-+	enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
- 
- 	cls_session = starget_to_session(scsi_target(scmd->device));
- 	session = cls_session->dd_data;
-@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
- 		 * We are probably in the middle of iscsi recovery so let
- 		 * that complete and handle the error.
- 		 */
--		rc = EH_RESET_TIMER;
-+		rc = BLK_EH_RESET_TIMER;
- 		goto done;
- 	}
- 
- 	conn = session->leadconn;
- 	if (!conn) {
- 		/* In the middle of shuting down */
--		rc = EH_RESET_TIMER;
-+		rc = BLK_EH_RESET_TIMER;
- 		goto done;
- 	}
- 
-@@ -1513,20 +1513,20 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
- 	 */
- 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
- 			    (conn->ping_timeout * HZ), jiffies))
--		rc = EH_RESET_TIMER;
-+		rc = BLK_EH_RESET_TIMER;
- 	/*
- 	 * if we are about to check the transport then give the command
- 	 * more time
- 	 */
- 	if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
- 			   jiffies))
--		rc = EH_RESET_TIMER;
-+		rc = BLK_EH_RESET_TIMER;
- 	/* if in the middle of checking the transport then give us more time */
- 	if (conn->ping_task)
--		rc = EH_RESET_TIMER;
-+		rc = BLK_EH_RESET_TIMER;
- done:
- 	spin_unlock(&session->lock);
--	debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
-+	debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "nh");
- 	return rc;
- }
- 
-diff --git a/fs/nfs/file.c b/fs/nfs/file.c
-index 7846065..30541f0 100644
---- a/fs/nfs/file.c
-+++ b/fs/nfs/file.c
-@@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
- 		file->f_path.dentry->d_name.name,
- 		mapping->host->i_ino, len, (long long) pos);
- 
--	page = __grab_cache_page(mapping, index);
-+	page = grab_cache_page_write_begin(mapping, index, flags);
- 	if (!page)
- 		return -ENOMEM;
- 	*pagep = page;
-diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
-index 108f47e..2389a2e 100644
---- a/include/linux/nfsd/nfsd.h
-+++ b/include/linux/nfsd/nfsd.h
-@@ -85,7 +85,8 @@ __be32		nfsd_setattr(struct svc_rqst *, struct svc_fh *,
- #ifdef CONFIG_NFSD_V4
- __be32          nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
-                     struct nfs4_acl *);
--int             nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
-+int             nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, 
-+		struct vfsmount *mnt, struct nfs4_acl **);
- #endif /* CONFIG_NFSD_V4 */
- __be32		nfsd_create(struct svc_rqst *, struct svc_fh *,
- 				char *name, int len, struct iattr *attrs,
-diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index 18060be..715ff2a 100644
---- a/fs/nfsd/vfs.c
-+++ b/fs/nfsd/vfs.c
-@@ -388,7 +388,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
- 	err = nfserr_notsync;
- 	if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
- 		fh_lock(fhp);
--		host_err = notify_change(dentry, iap);
-+		host_err = notify_change(dentry, fhp->fh_export->ex_path.mnt, iap);
- 		err = nfserrno(host_err);
- 		fh_unlock(fhp);
- 	}
-@@ -408,11 +408,12 @@ out_nfserr:
- #if defined(CONFIG_NFSD_V2_ACL) || \
-     defined(CONFIG_NFSD_V3_ACL) || \
-     defined(CONFIG_NFSD_V4)
--static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
-+static ssize_t nfsd_getxattr(struct dentry *dentry, struct vfsmount *mnt,
-+			     char *key, void **buf)
- {
- 	ssize_t buflen;
- 
--	buflen = vfs_getxattr(dentry, key, NULL, 0);
-+	buflen = vfs_getxattr(dentry, mnt, key, NULL, 0, NULL);
- 	if (buflen <= 0)
- 		return buflen;
- 
-@@ -420,13 +421,14 @@ static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
- 	if (!*buf)
- 		return -ENOMEM;
- 
--	return vfs_getxattr(dentry, key, *buf, buflen);
-+	return vfs_getxattr(dentry, mnt, key, *buf, buflen, NULL);
- }
- #endif
- 
- #if defined(CONFIG_NFSD_V4)
- static int
--set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
-+set_nfsv4_acl_one(struct dentry *dentry, struct vfsmount *mnt,
-+		  struct posix_acl *pacl, char *key)
- {
- 	int len;
- 	size_t buflen;
-@@ -445,7 +447,7 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
- 		goto out;
- 	}
- 
--	error = vfs_setxattr(dentry, key, buf, len, 0);
-+	error = vfs_setxattr(dentry, mnt, key, buf, len, 0, NULL);
- out:
- 	kfree(buf);
- 	return error;
-@@ -458,6 +460,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	__be32 error;
- 	int host_error;
- 	struct dentry *dentry;
-+	struct vfsmount *mnt;
- 	struct inode *inode;
- 	struct posix_acl *pacl = NULL, *dpacl = NULL;
- 	unsigned int flags = 0;
-@@ -468,6 +471,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		return error;
- 
- 	dentry = fhp->fh_dentry;
-+	mnt = fhp->fh_export->ex_path.mnt;
- 	inode = dentry->d_inode;
- 	if (S_ISDIR(inode->i_mode))
- 		flags = NFS4_ACL_DIR;
-@@ -478,12 +482,14 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	} else if (host_error < 0)
- 		goto out_nfserr;
- 
--	host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
-+	host_error = set_nfsv4_acl_one(dentry, mnt, pacl,
-+				       POSIX_ACL_XATTR_ACCESS);
- 	if (host_error < 0)
- 		goto out_release;
- 
- 	if (S_ISDIR(inode->i_mode))
--		host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
-+		host_error = set_nfsv4_acl_one(dentry, mnt, dpacl,
-+					       POSIX_ACL_XATTR_DEFAULT);
- 
- out_release:
- 	posix_acl_release(pacl);
-@@ -496,13 +502,13 @@ out_nfserr:
- }
- 
- static struct posix_acl *
--_get_posix_acl(struct dentry *dentry, char *key)
-+_get_posix_acl(struct dentry *dentry, struct vfsmount *mnt, char *key)
- {
- 	void *buf = NULL;
- 	struct posix_acl *pacl = NULL;
- 	int buflen;
- 
--	buflen = nfsd_getxattr(dentry, key, &buf);
-+	buflen = nfsd_getxattr(dentry, mnt, key, &buf);
- 	if (!buflen)
- 		buflen = -ENODATA;
- 	if (buflen <= 0)
-@@ -514,14 +520,15 @@ _get_posix_acl(struct dentry *dentry, char *key)
- }
- 
- int
--nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl)
-+nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
-+		   struct vfsmount *mnt, struct nfs4_acl **acl)
- {
- 	struct inode *inode = dentry->d_inode;
- 	int error = 0;
- 	struct posix_acl *pacl = NULL, *dpacl = NULL;
- 	unsigned int flags = 0;
- 
--	pacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_ACCESS);
-+	pacl = _get_posix_acl(dentry, mnt, POSIX_ACL_XATTR_ACCESS);
- 	if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA)
- 		pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
- 	if (IS_ERR(pacl)) {
-@@ -531,7 +538,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_ac
- 	}
- 
- 	if (S_ISDIR(inode->i_mode)) {
--		dpacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_DEFAULT);
-+		dpacl = _get_posix_acl(dentry, mnt, POSIX_ACL_XATTR_DEFAULT);
- 		if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA)
- 			dpacl = NULL;
- 		else if (IS_ERR(dpacl)) {
-@@ -944,13 +951,13 @@ out:
- 	return err;
- }
- 
--static void kill_suid(struct dentry *dentry)
-+static void kill_suid(struct dentry *dentry, struct vfsmount *mnt)
- {
- 	struct iattr	ia;
- 	ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
- 
- 	mutex_lock(&dentry->d_inode->i_mutex);
--	notify_change(dentry, &ia);
-+	notify_change(dentry, mnt, &ia);
- 	mutex_unlock(&dentry->d_inode->i_mutex);
- }
- 
-@@ -1009,7 +1016,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
- 
- 	/* clear setuid/setgid flag after write */
- 	if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
--		kill_suid(dentry);
-+		kill_suid(dentry, exp->ex_path.mnt);
- 
- 	if (host_err >= 0 && stable) {
- 		static ino_t	last_ino;
-@@ -1187,6 +1194,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		int type, dev_t rdev, struct svc_fh *resfhp)
- {
- 	struct dentry	*dentry, *dchild = NULL;
-+	struct svc_export *exp;
- 	struct inode	*dirp;
- 	__be32		err;
- 	__be32		err2;
-@@ -1204,6 +1212,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		goto out;
- 
- 	dentry = fhp->fh_dentry;
-+	exp = fhp->fh_export;
- 	dirp = dentry->d_inode;
- 
- 	err = nfserr_notdir;
-@@ -1220,7 +1229,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		host_err = PTR_ERR(dchild);
- 		if (IS_ERR(dchild))
- 			goto out_nfserr;
--		err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
-+		err = fh_compose(resfhp, exp, dchild, fhp);
- 		if (err)
- 			goto out;
- 	} else {
-@@ -1270,13 +1279,14 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
- 		break;
- 	case S_IFDIR:
--		host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
-+		host_err = vfs_mkdir(dirp, dchild, exp->ex_path.mnt, iap->ia_mode);
- 		break;
- 	case S_IFCHR:
- 	case S_IFBLK:
- 	case S_IFIFO:
- 	case S_IFSOCK:
--		host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
-+		host_err = vfs_mknod(dirp, dchild, exp->ex_path.mnt,
-+				     iap->ia_mode, rdev);
- 		break;
- 	}
- 	if (host_err < 0) {
-@@ -1284,7 +1294,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		goto out_nfserr;
- 	}
- 
--	if (EX_ISSYNC(fhp->fh_export)) {
-+	if (EX_ISSYNC(exp)) {
- 		err = nfserrno(nfsd_sync_dir(dentry));
- 		write_inode_now(dchild->d_inode, 1);
- 	}
-@@ -1514,6 +1524,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 				struct iattr *iap)
- {
- 	struct dentry	*dentry, *dnew;
-+	struct svc_export *exp;
- 	__be32		err, cerr;
- 	int		host_err;
- 
-@@ -1538,6 +1549,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 	if (host_err)
- 		goto out_nfserr;
- 
-+	exp = fhp->fh_export;
- 	if (unlikely(path[plen] != 0)) {
- 		char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
- 		if (path_alloced == NULL)
-@@ -1545,14 +1557,16 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 		else {
- 			strncpy(path_alloced, path, plen);
- 			path_alloced[plen] = 0;
--			host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced);
-+			host_err = vfs_symlink(dentry->d_inode, dnew,
-+					       exp->ex_path.mnt, path_alloced);
- 			kfree(path_alloced);
- 		}
- 	} else
--		host_err = vfs_symlink(dentry->d_inode, dnew, path);
-+		host_err = vfs_symlink(dentry->d_inode, dnew, exp->ex_path.mnt,
-+				       path);
- 
- 	if (!host_err) {
--		if (EX_ISSYNC(fhp->fh_export))
-+		if (EX_ISSYNC(exp))
- 			host_err = nfsd_sync_dir(dentry);
- 	}
- 	err = nfserrno(host_err);
-@@ -1560,7 +1574,7 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
- 
- 	mnt_drop_write(fhp->fh_export->ex_path.mnt);
- 
--	cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
-+	cerr = fh_compose(resfhp, exp, dnew, fhp);
- 	dput(dnew);
- 	if (err==0) err = cerr;
- out:
-@@ -1615,7 +1629,8 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
- 		err = nfserrno(host_err);
- 		goto out_dput;
- 	}
--	host_err = vfs_link(dold, dirp, dnew);
-+	host_err = vfs_link(dold, tfhp->fh_export->ex_path.mnt, dirp,
-+			    dnew, ffhp->fh_export->ex_path.mnt);
- 	if (!host_err) {
- 		if (EX_ISSYNC(ffhp->fh_export)) {
- 			err = nfserrno(nfsd_sync_dir(ddir));
-@@ -1716,7 +1731,8 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
- 	if (host_err)
- 		goto out_dput_new;
- 
--	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
-+	host_err = vfs_rename(fdir, odentry, ffhp->fh_export->ex_path.mnt,
-+			      tdir, ndentry, tfhp->fh_export->ex_path.mnt);
- 	if (!host_err && EX_ISSYNC(tfhp->fh_export)) {
- 		host_err = nfsd_sync_dir(tdentry);
- 		if (!host_err)
-@@ -1754,6 +1770,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 				char *fname, int flen)
- {
- 	struct dentry	*dentry, *rdentry;
-+	struct svc_export *exp;
- 	struct inode	*dirp;
- 	__be32		err;
- 	int		host_err;
-@@ -1768,6 +1785,7 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 	fh_lock_nested(fhp, I_MUTEX_PARENT);
- 	dentry = fhp->fh_dentry;
- 	dirp = dentry->d_inode;
-+	exp = fhp->fh_export;
- 
- 	rdentry = lookup_one_len(fname, dentry, flen);
- 	host_err = PTR_ERR(rdentry);
-@@ -1789,21 +1807,21 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
- 
- 	if (type != S_IFDIR) { /* It's UNLINK */
- #ifdef MSNFS
--		if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
-+		if ((exp->ex_flags & NFSEXP_MSNFS) &&
- 			(atomic_read(&rdentry->d_count) > 1)) {
- 			host_err = -EPERM;
- 		} else
- #endif
--		host_err = vfs_unlink(dirp, rdentry);
-+		host_err = vfs_unlink(dirp, rdentry, exp->ex_path.mnt);
- 	} else { /* It's RMDIR */
--		host_err = vfs_rmdir(dirp, rdentry);
-+		host_err = vfs_rmdir(dirp, rdentry, exp->ex_path.mnt);
- 	}
- 
- 	dput(rdentry);
- 
- 	if (host_err)
- 		goto out_drop;
--	if (EX_ISSYNC(fhp->fh_export))
-+	if (EX_ISSYNC(exp))
- 		host_err = nfsd_sync_dir(dentry);
- 
- out_drop:
-@@ -2036,7 +2054,8 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
- 		return ERR_PTR(-EOPNOTSUPP);
- 	}
- 
--	size = nfsd_getxattr(fhp->fh_dentry, name, &value);
-+	size = nfsd_getxattr(fhp->fh_dentry, fhp->fh_export->ex_path.mnt, name,
-+			     &value);
- 	if (size < 0)
- 		return ERR_PTR(size);
- 
-@@ -2048,6 +2067,7 @@ nfsd_get_posix_acl(struct svc_fh *fhp, int type)
- int
- nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
- {
-+	struct vfsmount *mnt;
- 	struct inode *inode = fhp->fh_dentry->d_inode;
- 	char *name;
- 	void *value = NULL;
-@@ -2080,21 +2100,24 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
- 	} else
- 		size = 0;
- 
--	error = mnt_want_write(fhp->fh_export->ex_path.mnt);
-+	mnt = fhp->fh_export->ex_path.mnt;
-+	error = mnt_want_write(mnt);
- 	if (error)
- 		goto getout;
- 	if (size)
--		error = vfs_setxattr(fhp->fh_dentry, name, value, size, 0);
-+		error = vfs_setxattr(fhp->fh_dentry, mnt, name, value, size, 0,
-+				     NULL);
- 	else {
- 		if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
- 			error = 0;
- 		else {
--			error = vfs_removexattr(fhp->fh_dentry, name);
-+			error = vfs_removexattr(fhp->fh_dentry, mnt, name,
-+						NULL);
- 			if (error == -ENODATA)
- 				error = 0;
- 		}
- 	}
--	mnt_drop_write(fhp->fh_export->ex_path.mnt);
-+	mnt_drop_write(mnt);
- 
- getout:
- 	kfree(value);
-diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 14ba4d9..4fc3121 100644
---- a/fs/nfsd/nfs4xdr.c
-+++ b/fs/nfsd/nfs4xdr.c
-@@ -1446,7 +1446,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
- 	}
- 	if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
- 			| FATTR4_WORD0_SUPPORTED_ATTRS)) {
--		err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
-+		err = nfsd4_get_nfs4_acl(rqstp, dentry, exp->ex_path.mnt, &acl);
- 		aclsupport = (err == 0);
- 		if (bmval0 & FATTR4_WORD0_ACL) {
- 			if (err == -EOPNOTSUPP)
-diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
-index 145b3c8..2ca394f 100644
---- a/fs/nfsd/nfs4recover.c
-+++ b/fs/nfsd/nfs4recover.c
-@@ -158,7 +158,8 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
- 	status = mnt_want_write(rec_dir.path.mnt);
- 	if (status)
- 		goto out_put;
--	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
-+	status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry,
-+			   rec_dir.path.mnt, S_IRWXU);
- 	mnt_drop_write(rec_dir.path.mnt);
- out_put:
- 	dput(dentry);
-@@ -263,7 +264,7 @@ nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
- 		return -EINVAL;
- 	}
- 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
--	status = vfs_unlink(dir->d_inode, dentry);
-+	status = vfs_unlink(dir->d_inode, dentry, rec_dir.path.mnt);
- 	mutex_unlock(&dir->d_inode->i_mutex);
- 	return status;
- }
-@@ -278,7 +279,7 @@ nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
- 	 * a kernel from the future.... */
- 	nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
- 	mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
--	status = vfs_rmdir(dir->d_inode, dentry);
-+	status = vfs_rmdir(dir->d_inode, dentry, rec_dir.path.mnt);
- 	mutex_unlock(&dir->d_inode->i_mutex);
- 	return status;
- }



More information about the ewg mailing list